aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 21:02:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 21:02:35 -0400
commit334d094504c2fe1c44211ecb49146ae6bca8c321 (patch)
treed3c0f68e4b9f8e3d2ccc39e7dfe5de0534a5fad9 /net
parentd1a4be630fb068f251d64b62919f143c49ca8057 (diff)
parentd1643d24c61b725bef399cc1cf2944b4c9c23177 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26: (1090 commits) [NET]: Fix and allocate less memory for ->priv'less netdevices [IPV6]: Fix dangling references on error in fib6_add(). [NETLABEL]: Fix NULL deref in netlbl_unlabel_staticlist_gen() if ifindex not found [PKT_SCHED]: Fix datalen check in tcf_simp_init(). [INET]: Uninline the __inet_inherit_port call. [INET]: Drop the inet_inherit_port() call. SCTP: Initialize partial_bytes_acked to 0, when all of the data is acked. [netdrvr] forcedeth: internal simplifications; changelog removal phylib: factor out get_phy_id from within get_phy_device PHY: add BCM5464 support to broadcom PHY driver cxgb3: Fix __must_check warning with dev_dbg. tc35815: Statistics cleanup natsemi: fix MMIO for PPC 44x platforms [TIPC]: Cleanup of TIPC reference table code [TIPC]: Optimized initialization of TIPC reference table [TIPC]: Remove inlining of reference table locking routines e1000: convert uint16_t style integers to u16 ixgb: convert uint16_t style integers to u16 sb1000.c: make const arrays static sb1000.c: stop inlining largish static functions ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c147
-rw-r--r--net/8021q/vlan.h15
-rw-r--r--net/8021q/vlan_dev.c15
-rw-r--r--net/8021q/vlan_netlink.c2
-rw-r--r--net/8021q/vlanproc.c70
-rw-r--r--net/8021q/vlanproc.h10
-rw-r--r--net/9p/error.c2
-rw-r--r--net/Kconfig2
-rw-r--r--net/appletalk/aarp.c25
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/atm/clip.c6
-rw-r--r--net/atm/lec.c37
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/atm/proc.c48
-rw-r--r--net/atm/resources.c5
-rw-r--r--net/atm/svc.c2
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/ax25/ax25_in.c2
-rw-r--r--net/bluetooth/l2cap.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_netfilter.c16
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_notify.c2
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_sysfs_br.c6
-rw-r--r--net/bridge/netfilter/Kconfig14
-rw-r--r--net/bridge/netfilter/Makefile1
-rw-r--r--net/bridge/netfilter/ebt_nflog.c74
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c2
-rw-r--r--net/bridge/netfilter/ebtable_nat.c2
-rw-r--r--net/can/af_can.c4
-rw-r--r--net/can/bcm.c253
-rw-r--r--net/can/raw.c2
-rw-r--r--net/core/dev.c44
-rw-r--r--net/core/dev_mcast.c37
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/ethtool.c64
-rw-r--r--net/core/fib_rules.c13
-rw-r--r--net/core/filter.c57
-rw-r--r--net/core/neighbour.c171
-rw-r--r--net/core/net_namespace.c104
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c45
-rw-r--r--net/core/skbuff.c96
-rw-r--r--net/core/sock.c151
-rw-r--r--net/core/sysctl_net_core.c12
-rw-r--r--net/dccp/dccp.h8
-rw-r--r--net/dccp/ipv4.c79
-rw-r--r--net/dccp/ipv6.c93
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/dccp/output.c6
-rw-r--r--net/dccp/proto.c27
-rw-r--r--net/decnet/af_decnet.c25
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/decnet/dn_fib.c4
-rw-r--r--net/decnet/dn_route.c6
-rw-r--r--net/decnet/dn_table.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ieee80211/Kconfig1
-rw-r--r--net/ieee80211/Makefile1
-rw-r--r--net/ieee80211/softmac/Kconfig12
-rw-r--r--net/ieee80211/softmac/Makefile9
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c489
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c413
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_event.c189
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c488
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c568
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h244
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c254
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c508
-rw-r--r--net/ipv4/af_inet.c70
-rw-r--r--net/ipv4/arp.c62
-rw-r--r--net/ipv4/cipso_ipv4.c1
-rw-r--r--net/ipv4/devinet.c35
-rw-r--r--net/ipv4/fib_frontend.c20
-rw-r--r--net/ipv4/fib_hash.c5
-rw-r--r--net/ipv4/fib_rules.c2
-rw-r--r--net/ipv4/fib_semantics.c3
-rw-r--r--net/ipv4/fib_trie.c240
-rw-r--r--net/ipv4/icmp.c203
-rw-r--r--net/ipv4/igmp.c45
-rw-r--r--net/ipv4/inet_connection_sock.c45
-rw-r--r--net/ipv4/inet_fragment.c10
-rw-r--r--net/ipv4/inet_hashtables.c43
-rw-r--r--net/ipv4/inet_timewait_sock.c5
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_fragment.c26
-rw-r--r--net/ipv4/ip_gre.c228
-rw-r--r--net/ipv4/ip_input.c21
-rw-r--r--net/ipv4/ip_options.c63
-rw-r--r--net/ipv4/ip_output.c28
-rw-r--r--net/ipv4/ip_sockglue.c13
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/ipconfig.c39
-rw-r--r--net/ipv4/ipip.c220
-rw-r--r--net/ipv4/ipmr.c12
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_udp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c4
-rw-r--r--net/ipv4/netfilter.c37
-rw-r--r--net/ipv4/netfilter/Kconfig15
-rw-r--r--net/ipv4/netfilter/Makefile5
-rw-r--r--net/ipv4/netfilter/arp_tables.c89
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c12
-rw-r--r--net/ipv4/netfilter/arptable_filter.c7
-rw-r--r--net/ipv4/netfilter/ip_queue.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c53
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c14
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c2
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c9
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c18
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c6
-rw-r--r--net/ipv4/netfilter/ipt_recent.c6
-rw-r--r--net/ipv4/netfilter/iptable_filter.c21
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c51
-rw-r--r--net/ipv4/netfilter/iptable_raw.c8
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c70
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c15
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c27
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c61
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c120
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_dccp.c108
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c45
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c19
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_sctp.c96
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c80
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c77
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udplite.c99
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_unknown.c25
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c25
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c556
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c29
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c76
-rw-r--r--net/ipv4/proc.c71
-rw-r--r--net/ipv4/raw.c47
-rw-r--r--net/ipv4/route.c278
-rw-r--r--net/ipv4/syncookies.c102
-rw-r--r--net/ipv4/sysctl_net_ipv4.c162
-rw-r--r--net/ipv4/tcp.c18
-rw-r--r--net/ipv4/tcp_cubic.c35
-rw-r--r--net/ipv4/tcp_input.c76
-rw-r--r--net/ipv4/tcp_ipv4.c193
-rw-r--r--net/ipv4/tcp_minisocks.c36
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv4/tcp_probe.c2
-rw-r--r--net/ipv4/tcp_timer.c19
-rw-r--r--net/ipv4/tunnel4.c2
-rw-r--r--net/ipv4/udp.c138
-rw-r--r--net/ipv4/udp_impl.h6
-rw-r--r--net/ipv4/udplite.c62
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/Kconfig41
-rw-r--r--net/ipv6/Makefile3
-rw-r--r--net/ipv6/addrconf.c765
-rw-r--r--net/ipv6/addrlabel.c25
-rw-r--r--net/ipv6/af_inet6.c227
-rw-r--r--net/ipv6/anycast.c63
-rw-r--r--net/ipv6/fib6_rules.c113
-rw-r--r--net/ipv6/icmp.c145
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/inet6_hashtables.c10
-rw-r--r--net/ipv6/ip6_fib.c298
-rw-r--r--net/ipv6/ip6_flowlabel.c101
-rw-r--r--net/ipv6/ip6_input.c98
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c228
-rw-r--r--net/ipv6/ip6mr.c1643
-rw-r--r--net/ipv6/ipv6_sockglue.c365
-rw-r--r--net/ipv6/mcast.c272
-rw-r--r--net/ipv6/mip6.c24
-rw-r--r--net/ipv6/ndisc.c237
-rw-r--r--net/ipv6/netfilter.c46
-rw-r--r--net/ipv6/netfilter/ip6_queue.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c51
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c6
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c7
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c3
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c3
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c2
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c2
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c29
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c23
-rw-r--r--net/ipv6/proc.c73
-rw-r--r--net/ipv6/raw.c69
-rw-r--r--net/ipv6/reassembly.c19
-rw-r--r--net/ipv6/route.c640
-rw-r--r--net/ipv6/sit.c456
-rw-r--r--net/ipv6/syncookies.c279
-rw-r--r--net/ipv6/sysctl_net_ipv6.c15
-rw-r--r--net/ipv6/tcp_ipv6.c199
-rw-r--r--net/ipv6/udp.c44
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c40
-rw-r--r--net/ipv6/xfrm6_input.c55
-rw-r--r--net/ipv6/xfrm6_policy.c9
-rw-r--r--net/ipv6/xfrm6_state.c171
-rw-r--r--net/ipv6/xfrm6_tunnel.c45
-rw-r--r--net/ipx/af_ipx.c4
-rw-r--r--net/irda/af_irda.c180
-rw-r--r--net/irda/discovery.c4
-rw-r--r--net/irda/ircomm/ircomm_core.c46
-rw-r--r--net/irda/ircomm/ircomm_event.c12
-rw-r--r--net/irda/ircomm/ircomm_lmp.c30
-rw-r--r--net/irda/ircomm/ircomm_param.c32
-rw-r--r--net/irda/ircomm/ircomm_ttp.c26
-rw-r--r--net/irda/ircomm/ircomm_tty.c90
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c78
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c18
-rw-r--r--net/irda/irda_device.c18
-rw-r--r--net/irda/iriap.c76
-rw-r--r--net/irda/iriap_event.c38
-rw-r--r--net/irda/irias_object.c44
-rw-r--r--net/irda/irlan/irlan_client.c48
-rw-r--r--net/irda/irlan/irlan_client_event.c64
-rw-r--r--net/irda/irlan/irlan_common.c82
-rw-r--r--net/irda/irlan/irlan_eth.c51
-rw-r--r--net/irda/irlan/irlan_event.c4
-rw-r--r--net/irda/irlan/irlan_filter.c4
-rw-r--r--net/irda/irlan/irlan_provider.c30
-rw-r--r--net/irda/irlan/irlan_provider_event.c16
-rw-r--r--net/irda/irlap.c56
-rw-r--r--net/irda/irlap_event.c122
-rw-r--r--net/irda/irlap_frame.c76
-rw-r--r--net/irda/irlmp.c112
-rw-r--r--net/irda/irlmp_event.c94
-rw-r--r--net/irda/irlmp_frame.c36
-rw-r--r--net/irda/irmod.c2
-rw-r--r--net/irda/irnet/irnet.h10
-rw-r--r--net/irda/irnetlink.c4
-rw-r--r--net/irda/irqueue.c8
-rw-r--r--net/irda/irttp.c108
-rw-r--r--net/irda/parameters.c42
-rw-r--r--net/irda/qos.c24
-rw-r--r--net/irda/wrapper.c18
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c17
-rw-r--r--net/key/af_key.c143
-rw-r--r--net/llc/af_llc.c16
-rw-r--r--net/llc/llc_c_ac.c2
-rw-r--r--net/llc/llc_c_ev.c8
-rw-r--r--net/llc/llc_conn.c26
-rw-r--r--net/llc/llc_input.c6
-rw-r--r--net/mac80211/Kconfig33
-rw-r--r--net/mac80211/Makefile21
-rw-r--r--net/mac80211/aes_ccm.c2
-rw-r--r--net/mac80211/cfg.c458
-rw-r--r--net/mac80211/debugfs.c53
-rw-r--r--net/mac80211/debugfs_key.c42
-rw-r--r--net/mac80211/debugfs_key.h11
-rw-r--r--net/mac80211/debugfs_netdev.c218
-rw-r--r--net/mac80211/debugfs_sta.c181
-rw-r--r--net/mac80211/debugfs_sta.h2
-rw-r--r--net/mac80211/ieee80211_i.h525
-rw-r--r--net/mac80211/iface.c (renamed from net/mac80211/ieee80211_iface.c)48
-rw-r--r--net/mac80211/key.c393
-rw-r--r--net/mac80211/key.h (renamed from net/mac80211/ieee80211_key.h)42
-rw-r--r--net/mac80211/led.c (renamed from net/mac80211/ieee80211_led.c)2
-rw-r--r--net/mac80211/led.h (renamed from net/mac80211/ieee80211_led.h)0
-rw-r--r--net/mac80211/main.c (renamed from net/mac80211/ieee80211.c)1061
-rw-r--r--net/mac80211/mesh.c449
-rw-r--r--net/mac80211/mesh.h290
-rw-r--r--net/mac80211/mesh_hwmp.c855
-rw-r--r--net/mac80211/mesh_pathtbl.c516
-rw-r--r--net/mac80211/mesh_plink.c762
-rw-r--r--net/mac80211/mlme.c (renamed from net/mac80211/ieee80211_sta.c)1831
-rw-r--r--net/mac80211/rate.c (renamed from net/mac80211/ieee80211_rate.c)25
-rw-r--r--net/mac80211/rate.h (renamed from net/mac80211/ieee80211_rate.h)44
-rw-r--r--net/mac80211/rc80211_pid_algo.c124
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c2
-rw-r--r--net/mac80211/rc80211_simple.c400
-rw-r--r--net/mac80211/regdomain.c152
-rw-r--r--net/mac80211/rx.c869
-rw-r--r--net/mac80211/sta_info.c698
-rw-r--r--net/mac80211/sta_info.h357
-rw-r--r--net/mac80211/tkip.c70
-rw-r--r--net/mac80211/tkip.h4
-rw-r--r--net/mac80211/tx.c732
-rw-r--r--net/mac80211/util.c172
-rw-r--r--net/mac80211/wep.c40
-rw-r--r--net/mac80211/wep.h10
-rw-r--r--net/mac80211/wext.c (renamed from net/mac80211/ieee80211_ioctl.c)270
-rw-r--r--net/mac80211/wme.c139
-rw-r--r--net/mac80211/wme.h23
-rw-r--r--net/mac80211/wpa.c152
-rw-r--r--net/mac80211/wpa.h24
-rw-r--r--net/netfilter/Kconfig10
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/core.c8
-rw-r--r--net/netfilter/nf_conntrack_amanda.c18
-rw-r--r--net/netfilter/nf_conntrack_core.c29
-rw-r--r--net/netfilter/nf_conntrack_expect.c81
-rw-r--r--net/netfilter/nf_conntrack_extend.c3
-rw-r--r--net/netfilter/nf_conntrack_ftp.c19
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c95
-rw-r--r--net/netfilter/nf_conntrack_helper.c5
-rw-r--r--net/netfilter/nf_conntrack_irc.c24
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c12
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c11
-rw-r--r--net/netfilter/nf_conntrack_netlink.c34
-rw-r--r--net/netfilter/nf_conntrack_pptp.c18
-rw-r--r--net/netfilter/nf_conntrack_proto.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c815
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c20
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c31
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c50
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c64
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c55
-rw-r--r--net/netfilter/nf_conntrack_sane.c14
-rw-r--r--net/netfilter/nf_conntrack_sip.c1391
-rw-r--r--net/netfilter/nf_conntrack_standalone.c133
-rw-r--r--net/netfilter/nf_conntrack_tftp.c19
-rw-r--r--net/netfilter/nf_sockopt.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/x_tables.c22
-rw-r--r--net/netfilter/xt_CONNSECMARK.c2
-rw-r--r--net/netfilter/xt_RATEEST.c2
-rw-r--r--net/netfilter/xt_connlimit.c10
-rw-r--r--net/netfilter/xt_conntrack.c4
-rw-r--r--net/netfilter/xt_dccp.c3
-rw-r--r--net/netfilter/xt_esp.c3
-rw-r--r--net/netfilter/xt_multiport.c6
-rw-r--r--net/netfilter/xt_policy.c2
-rw-r--r--net/netfilter/xt_rateest.c4
-rw-r--r--net/netfilter/xt_sctp.c6
-rw-r--r--net/netfilter/xt_tcpmss.c6
-rw-r--r--net/netfilter/xt_tcpudp.c9
-rw-r--r--net/netfilter/xt_time.c2
-rw-r--r--net/netlabel/netlabel_unlabeled.c6
-rw-r--r--net/netlink/af_netlink.c83
-rw-r--r--net/netrom/af_netrom.c4
-rw-r--r--net/packet/af_packet.c33
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/rxrpc/ar-internal.h8
-rw-r--r--net/rxrpc/ar-proc.c4
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/em_meta.c4
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sctp/associola.c42
-rw-r--r--net/sctp/bind_addr.c15
-rw-r--r--net/sctp/chunk.c23
-rw-r--r--net/sctp/command.c10
-rw-r--r--net/sctp/input.c3
-rw-r--r--net/sctp/ipv6.c17
-rw-r--r--net/sctp/output.c14
-rw-r--r--net/sctp/outqueue.c68
-rw-r--r--net/sctp/proc.c27
-rw-r--r--net/sctp/protocol.c50
-rw-r--r--net/sctp/sm_make_chunk.c12
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c38
-rw-r--r--net/sctp/socket.c81
-rw-r--r--net/sctp/transport.c10
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/clnt.c44
-rw-r--r--net/sunrpc/rpc_pipe.c12
-rw-r--r--net/sunrpc/rpcb_clnt.c20
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/core.c11
-rw-r--r--net/tipc/core.h23
-rw-r--r--net/tipc/eth_media.c4
-rw-r--r--net/tipc/link.c111
-rw-r--r--net/tipc/msg.c16
-rw-r--r--net/tipc/msg.h50
-rw-r--r--net/tipc/port.c43
-rw-r--r--net/tipc/ref.c211
-rw-r--r--net/tipc/ref.h89
-rw-r--r--net/tipc/socket.c1188
-rw-r--r--net/unix/af_unix.c32
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/core.c41
-rw-r--r--net/wireless/core.h3
-rw-r--r--net/wireless/nl80211.c552
-rw-r--r--net/wireless/reg.c162
-rw-r--r--net/wireless/util.c121
-rw-r--r--net/wireless/wext.c2
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/xfrm/xfrm_policy.c156
-rw-r--r--net/xfrm/xfrm_state.c53
-rw-r--r--net/xfrm/xfrm_user.c100
391 files changed, 23572 insertions, 13850 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index b33410abfd6b..2a739adaa92b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -32,6 +32,7 @@
32#include <linux/rtnetlink.h> 32#include <linux/rtnetlink.h>
33#include <linux/notifier.h> 33#include <linux/notifier.h>
34#include <net/net_namespace.h> 34#include <net/net_namespace.h>
35#include <net/netns/generic.h>
35 36
36#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
37#include "vlan.h" 38#include "vlan.h"
@@ -41,6 +42,8 @@
41 42
42/* Global VLAN variables */ 43/* Global VLAN variables */
43 44
45int vlan_net_id;
46
44/* Our listing of VLAN group(s) */ 47/* Our listing of VLAN group(s) */
45static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; 48static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
46 49
@@ -49,9 +52,6 @@ static char vlan_version[] = DRV_VERSION;
49static char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; 52static char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
50static char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; 53static char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
51 54
52/* Determines interface naming scheme. */
53unsigned short vlan_name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
54
55static struct packet_type vlan_packet_type = { 55static struct packet_type vlan_packet_type = {
56 .type = __constant_htons(ETH_P_8021Q), 56 .type = __constant_htons(ETH_P_8021Q),
57 .func = vlan_skb_recv, /* VLAN receive method */ 57 .func = vlan_skb_recv, /* VLAN receive method */
@@ -65,14 +65,14 @@ static inline unsigned int vlan_grp_hashfn(unsigned int idx)
65} 65}
66 66
67/* Must be invoked with RCU read lock (no preempt) */ 67/* Must be invoked with RCU read lock (no preempt) */
68static struct vlan_group *__vlan_find_group(int real_dev_ifindex) 68static struct vlan_group *__vlan_find_group(struct net_device *real_dev)
69{ 69{
70 struct vlan_group *grp; 70 struct vlan_group *grp;
71 struct hlist_node *n; 71 struct hlist_node *n;
72 int hash = vlan_grp_hashfn(real_dev_ifindex); 72 int hash = vlan_grp_hashfn(real_dev->ifindex);
73 73
74 hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) { 74 hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) {
75 if (grp->real_dev_ifindex == real_dev_ifindex) 75 if (grp->real_dev == real_dev)
76 return grp; 76 return grp;
77 } 77 }
78 78
@@ -86,7 +86,7 @@ static struct vlan_group *__vlan_find_group(int real_dev_ifindex)
86struct net_device *__find_vlan_dev(struct net_device *real_dev, 86struct net_device *__find_vlan_dev(struct net_device *real_dev,
87 unsigned short VID) 87 unsigned short VID)
88{ 88{
89 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); 89 struct vlan_group *grp = __vlan_find_group(real_dev);
90 90
91 if (grp) 91 if (grp)
92 return vlan_group_get_device(grp, VID); 92 return vlan_group_get_device(grp, VID);
@@ -103,32 +103,38 @@ static void vlan_group_free(struct vlan_group *grp)
103 kfree(grp); 103 kfree(grp);
104} 104}
105 105
106static struct vlan_group *vlan_group_alloc(int ifindex) 106static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
107{ 107{
108 struct vlan_group *grp; 108 struct vlan_group *grp;
109 unsigned int size;
110 unsigned int i;
111 109
112 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); 110 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
113 if (!grp) 111 if (!grp)
114 return NULL; 112 return NULL;
115 113
116 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN; 114 grp->real_dev = real_dev;
117
118 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) {
119 grp->vlan_devices_arrays[i] = kzalloc(size, GFP_KERNEL);
120 if (!grp->vlan_devices_arrays[i])
121 goto err;
122 }
123
124 grp->real_dev_ifindex = ifindex;
125 hlist_add_head_rcu(&grp->hlist, 115 hlist_add_head_rcu(&grp->hlist,
126 &vlan_group_hash[vlan_grp_hashfn(ifindex)]); 116 &vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]);
127 return grp; 117 return grp;
118}
128 119
129err: 120static int vlan_group_prealloc_vid(struct vlan_group *vg, int vid)
130 vlan_group_free(grp); 121{
131 return NULL; 122 struct net_device **array;
123 unsigned int size;
124
125 ASSERT_RTNL();
126
127 array = vg->vlan_devices_arrays[vid / VLAN_GROUP_ARRAY_PART_LEN];
128 if (array != NULL)
129 return 0;
130
131 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
132 array = kzalloc(size, GFP_KERNEL);
133 if (array == NULL)
134 return -ENOBUFS;
135
136 vg->vlan_devices_arrays[vid / VLAN_GROUP_ARRAY_PART_LEN] = array;
137 return 0;
132} 138}
133 139
134static void vlan_rcu_free(struct rcu_head *rcu) 140static void vlan_rcu_free(struct rcu_head *rcu)
@@ -145,11 +151,9 @@ void unregister_vlan_dev(struct net_device *dev)
145 151
146 ASSERT_RTNL(); 152 ASSERT_RTNL();
147 153
148 grp = __vlan_find_group(real_dev->ifindex); 154 grp = __vlan_find_group(real_dev);
149 BUG_ON(!grp); 155 BUG_ON(!grp);
150 156
151 vlan_proc_rem_dev(dev);
152
153 /* Take it out of our own structures, but be sure to interlock with 157 /* Take it out of our own structures, but be sure to interlock with
154 * HW accelerating devices or SW vlan input packet processing. 158 * HW accelerating devices or SW vlan input packet processing.
155 */ 159 */
@@ -240,13 +244,17 @@ int register_vlan_dev(struct net_device *dev)
240 struct vlan_group *grp, *ngrp = NULL; 244 struct vlan_group *grp, *ngrp = NULL;
241 int err; 245 int err;
242 246
243 grp = __vlan_find_group(real_dev->ifindex); 247 grp = __vlan_find_group(real_dev);
244 if (!grp) { 248 if (!grp) {
245 ngrp = grp = vlan_group_alloc(real_dev->ifindex); 249 ngrp = grp = vlan_group_alloc(real_dev);
246 if (!grp) 250 if (!grp)
247 return -ENOBUFS; 251 return -ENOBUFS;
248 } 252 }
249 253
254 err = vlan_group_prealloc_vid(grp, vlan_id);
255 if (err < 0)
256 goto out_free_group;
257
250 err = register_netdevice(dev); 258 err = register_netdevice(dev);
251 if (err < 0) 259 if (err < 0)
252 goto out_free_group; 260 goto out_free_group;
@@ -268,9 +276,6 @@ int register_vlan_dev(struct net_device *dev)
268 if (real_dev->features & NETIF_F_HW_VLAN_FILTER) 276 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
269 real_dev->vlan_rx_add_vid(real_dev, vlan_id); 277 real_dev->vlan_rx_add_vid(real_dev, vlan_id);
270 278
271 if (vlan_proc_add_dev(dev) < 0)
272 pr_warning("8021q: failed to add proc entry for %s\n",
273 dev->name);
274 return 0; 279 return 0;
275 280
276out_free_group: 281out_free_group:
@@ -286,6 +291,8 @@ static int register_vlan_device(struct net_device *real_dev,
286 unsigned short VLAN_ID) 291 unsigned short VLAN_ID)
287{ 292{
288 struct net_device *new_dev; 293 struct net_device *new_dev;
294 struct net *net = dev_net(real_dev);
295 struct vlan_net *vn = net_generic(net, vlan_net_id);
289 char name[IFNAMSIZ]; 296 char name[IFNAMSIZ];
290 int err; 297 int err;
291 298
@@ -297,7 +304,7 @@ static int register_vlan_device(struct net_device *real_dev,
297 return err; 304 return err;
298 305
299 /* Gotta set up the fields for the device. */ 306 /* Gotta set up the fields for the device. */
300 switch (vlan_name_type) { 307 switch (vn->name_type) {
301 case VLAN_NAME_TYPE_RAW_PLUS_VID: 308 case VLAN_NAME_TYPE_RAW_PLUS_VID:
302 /* name will look like: eth1.0005 */ 309 /* name will look like: eth1.0005 */
303 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, VLAN_ID); 310 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, VLAN_ID);
@@ -328,6 +335,7 @@ static int register_vlan_device(struct net_device *real_dev,
328 if (new_dev == NULL) 335 if (new_dev == NULL)
329 return -ENOBUFS; 336 return -ENOBUFS;
330 337
338 dev_net_set(new_dev, net);
331 /* need 4 bytes for extra VLAN header info, 339 /* need 4 bytes for extra VLAN header info,
332 * hope the underlying device can handle it. 340 * hope the underlying device can handle it.
333 */ 341 */
@@ -383,6 +391,14 @@ static void __vlan_device_event(struct net_device *dev, unsigned long event)
383 pr_warning("8021q: failed to change proc name for %s\n", 391 pr_warning("8021q: failed to change proc name for %s\n",
384 dev->name); 392 dev->name);
385 break; 393 break;
394 case NETDEV_REGISTER:
395 if (vlan_proc_add_dev(dev) < 0)
396 pr_warning("8021q: failed to add proc entry for %s\n",
397 dev->name);
398 break;
399 case NETDEV_UNREGISTER:
400 vlan_proc_rem_dev(dev);
401 break;
386 } 402 }
387} 403}
388 404
@@ -394,15 +410,12 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
394 int i, flgs; 410 int i, flgs;
395 struct net_device *vlandev; 411 struct net_device *vlandev;
396 412
397 if (dev->nd_net != &init_net)
398 return NOTIFY_DONE;
399
400 if (is_vlan_dev(dev)) { 413 if (is_vlan_dev(dev)) {
401 __vlan_device_event(dev, event); 414 __vlan_device_event(dev, event);
402 goto out; 415 goto out;
403 } 416 }
404 417
405 grp = __vlan_find_group(dev->ifindex); 418 grp = __vlan_find_group(dev);
406 if (!grp) 419 if (!grp)
407 goto out; 420 goto out;
408 421
@@ -522,7 +535,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
522 case GET_VLAN_REALDEV_NAME_CMD: 535 case GET_VLAN_REALDEV_NAME_CMD:
523 case GET_VLAN_VID_CMD: 536 case GET_VLAN_VID_CMD:
524 err = -ENODEV; 537 err = -ENODEV;
525 dev = __dev_get_by_name(&init_net, args.device1); 538 dev = __dev_get_by_name(net, args.device1);
526 if (!dev) 539 if (!dev)
527 goto out; 540 goto out;
528 541
@@ -567,7 +580,10 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
567 break; 580 break;
568 if ((args.u.name_type >= 0) && 581 if ((args.u.name_type >= 0) &&
569 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { 582 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
570 vlan_name_type = args.u.name_type; 583 struct vlan_net *vn;
584
585 vn = net_generic(net, vlan_net_id);
586 vn->name_type = args.u.name_type;
571 err = 0; 587 err = 0;
572 } else { 588 } else {
573 err = -EINVAL; 589 err = -EINVAL;
@@ -615,6 +631,51 @@ out:
615 return err; 631 return err;
616} 632}
617 633
634static int vlan_init_net(struct net *net)
635{
636 int err;
637 struct vlan_net *vn;
638
639 err = -ENOMEM;
640 vn = kzalloc(sizeof(struct vlan_net), GFP_KERNEL);
641 if (vn == NULL)
642 goto err_alloc;
643
644 err = net_assign_generic(net, vlan_net_id, vn);
645 if (err < 0)
646 goto err_assign;
647
648 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
649
650 err = vlan_proc_init(net);
651 if (err < 0)
652 goto err_proc;
653
654 return 0;
655
656err_proc:
657 /* nothing */
658err_assign:
659 kfree(vn);
660err_alloc:
661 return err;
662}
663
664static void vlan_exit_net(struct net *net)
665{
666 struct vlan_net *vn;
667
668 vn = net_generic(net, vlan_net_id);
669 rtnl_kill_links(net, &vlan_link_ops);
670 vlan_proc_cleanup(net);
671 kfree(vn);
672}
673
674static struct pernet_operations vlan_net_ops = {
675 .init = vlan_init_net,
676 .exit = vlan_exit_net,
677};
678
618static int __init vlan_proto_init(void) 679static int __init vlan_proto_init(void)
619{ 680{
620 int err; 681 int err;
@@ -622,9 +683,9 @@ static int __init vlan_proto_init(void)
622 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); 683 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
623 pr_info("All bugs added by %s\n", vlan_buggyright); 684 pr_info("All bugs added by %s\n", vlan_buggyright);
624 685
625 err = vlan_proc_init(); 686 err = register_pernet_gen_device(&vlan_net_id, &vlan_net_ops);
626 if (err < 0) 687 if (err < 0)
627 goto err1; 688 goto err0;
628 689
629 err = register_netdevice_notifier(&vlan_notifier_block); 690 err = register_netdevice_notifier(&vlan_notifier_block);
630 if (err < 0) 691 if (err < 0)
@@ -641,8 +702,8 @@ static int __init vlan_proto_init(void)
641err3: 702err3:
642 unregister_netdevice_notifier(&vlan_notifier_block); 703 unregister_netdevice_notifier(&vlan_notifier_block);
643err2: 704err2:
644 vlan_proc_cleanup(); 705 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
645err1: 706err0:
646 return err; 707 return err;
647} 708}
648 709
@@ -661,7 +722,7 @@ static void __exit vlan_cleanup_module(void)
661 for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) 722 for (i = 0; i < VLAN_GRP_HASH_SIZE; i++)
662 BUG_ON(!hlist_empty(&vlan_group_hash[i])); 723 BUG_ON(!hlist_empty(&vlan_group_hash[i]));
663 724
664 vlan_proc_cleanup(); 725 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
665 726
666 synchronize_net(); 727 synchronize_net();
667} 728}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 51271aea402b..5229a72c7ea1 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/if_vlan.h> 4#include <linux/if_vlan.h>
5 5
6extern unsigned short vlan_name_type;
7
8#define VLAN_GRP_HASH_SHIFT 5 6#define VLAN_GRP_HASH_SHIFT 5
9#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT) 7#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
10#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1) 8#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
@@ -50,4 +48,17 @@ static inline int is_vlan_dev(struct net_device *dev)
50 return dev->priv_flags & IFF_802_1Q_VLAN; 48 return dev->priv_flags & IFF_802_1Q_VLAN;
51} 49}
52 50
51extern int vlan_net_id;
52
53struct proc_dir_entry;
54
55struct vlan_net {
56 /* /proc/net/vlan */
57 struct proc_dir_entry *proc_vlan_dir;
58 /* /proc/net/vlan/config */
59 struct proc_dir_entry *proc_vlan_conf;
60 /* Determines interface naming scheme. */
61 unsigned short name_type;
62};
63
53#endif /* !(__BEN_VLAN_802_1Q_INC__) */ 64#endif /* !(__BEN_VLAN_802_1Q_INC__) */
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 41a76a05e6fd..c961f0826005 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -153,9 +153,6 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
153 struct net_device_stats *stats; 153 struct net_device_stats *stats;
154 unsigned short vlan_TCI; 154 unsigned short vlan_TCI;
155 155
156 if (dev->nd_net != &init_net)
157 goto err_free;
158
159 skb = skb_share_check(skb, GFP_ATOMIC); 156 skb = skb_share_check(skb, GFP_ATOMIC);
160 if (skb == NULL) 157 if (skb == NULL)
161 goto err_free; 158 goto err_free;
@@ -171,7 +168,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
171 skb->dev = __find_vlan_dev(dev, vid); 168 skb->dev = __find_vlan_dev(dev, vid);
172 if (!skb->dev) { 169 if (!skb->dev) {
173 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", 170 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
174 __FUNCTION__, (unsigned int)vid, dev->name); 171 __func__, (unsigned int)vid, dev->name);
175 goto err_unlock; 172 goto err_unlock;
176 } 173 }
177 174
@@ -187,7 +184,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
187 ntohs(vhdr->h_vlan_TCI)); 184 ntohs(vhdr->h_vlan_TCI));
188 185
189 pr_debug("%s: priority: %u for TCI: %hu\n", 186 pr_debug("%s: priority: %u for TCI: %hu\n",
190 __FUNCTION__, skb->priority, ntohs(vhdr->h_vlan_TCI)); 187 __func__, skb->priority, ntohs(vhdr->h_vlan_TCI));
191 188
192 switch (skb->pkt_type) { 189 switch (skb->pkt_type) {
193 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ 190 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
@@ -268,7 +265,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
268 struct net_device *vdev = dev; 265 struct net_device *vdev = dev;
269 266
270 pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n", 267 pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n",
271 __FUNCTION__, skb, type, len, vlan_dev_info(dev)->vlan_id, 268 __func__, skb, type, len, vlan_dev_info(dev)->vlan_id,
272 daddr); 269 daddr);
273 270
274 /* build vlan header only if re_order_header flag is NOT set. This 271 /* build vlan header only if re_order_header flag is NOT set. This
@@ -340,7 +337,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
340 return -ENOMEM; 337 return -ENOMEM;
341 } 338 }
342 vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++; 339 vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++;
343 pr_debug("%s: %s: had to grow skb\n", __FUNCTION__, vdev->name); 340 pr_debug("%s: %s: had to grow skb\n", __func__, vdev->name);
344 } 341 }
345 342
346 if (build_vlan_header) { 343 if (build_vlan_header) {
@@ -382,7 +379,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
382 vlan_dev_info(dev)->cnt_encap_on_xmit++; 379 vlan_dev_info(dev)->cnt_encap_on_xmit++;
383 380
384 pr_debug("%s: proto to encap: 0x%hx\n", 381 pr_debug("%s: proto to encap: 0x%hx\n",
385 __FUNCTION__, ntohs(veth->h_vlan_proto)); 382 __func__, ntohs(veth->h_vlan_proto));
386 /* Construct the second two bytes. This field looks something 383 /* Construct the second two bytes. This field looks something
387 * like: 384 * like:
388 * usr_priority: 3 bits (high bits) 385 * usr_priority: 3 bits (high bits)
@@ -403,7 +400,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
403 } 400 }
404 401
405 pr_debug("%s: about to send skb: %p to dev: %s\n", 402 pr_debug("%s: about to send skb: %p to dev: %s\n",
406 __FUNCTION__, skb, skb->dev->name); 403 __func__, skb, skb->dev->name);
407 pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n", 404 pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n",
408 veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], 405 veth->h_dest[0], veth->h_dest[1], veth->h_dest[2],
409 veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], 406 veth->h_dest[3], veth->h_dest[4], veth->h_dest[5],
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index e32eeb37987e..c93e69ec28ed 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -113,7 +113,7 @@ static int vlan_newlink(struct net_device *dev,
113 113
114 if (!tb[IFLA_LINK]) 114 if (!tb[IFLA_LINK])
115 return -EINVAL; 115 return -EINVAL;
116 real_dev = __dev_get_by_index(&init_net, nla_get_u32(tb[IFLA_LINK])); 116 real_dev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
117 if (!real_dev) 117 if (!real_dev)
118 return -ENODEV; 118 return -ENODEV;
119 119
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 9671aa51af2c..daad0064e2c2 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -34,6 +34,7 @@
34#include <linux/netdevice.h> 34#include <linux/netdevice.h>
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36#include <net/net_namespace.h> 36#include <net/net_namespace.h>
37#include <net/netns/generic.h>
37#include "vlanproc.h" 38#include "vlanproc.h"
38#include "vlan.h" 39#include "vlan.h"
39 40
@@ -79,7 +80,8 @@ static const struct seq_operations vlan_seq_ops = {
79 80
80static int vlan_seq_open(struct inode *inode, struct file *file) 81static int vlan_seq_open(struct inode *inode, struct file *file)
81{ 82{
82 return seq_open(file, &vlan_seq_ops); 83 return seq_open_net(inode, file, &vlan_seq_ops,
84 sizeof(struct seq_net_private));
83} 85}
84 86
85static const struct file_operations vlan_fops = { 87static const struct file_operations vlan_fops = {
@@ -87,7 +89,7 @@ static const struct file_operations vlan_fops = {
87 .open = vlan_seq_open, 89 .open = vlan_seq_open,
88 .read = seq_read, 90 .read = seq_read,
89 .llseek = seq_lseek, 91 .llseek = seq_lseek,
90 .release = seq_release, 92 .release = seq_release_net,
91}; 93};
92 94
93/* 95/*
@@ -111,18 +113,6 @@ static const struct file_operations vlandev_fops = {
111 * Proc filesystem derectory entries. 113 * Proc filesystem derectory entries.
112 */ 114 */
113 115
114/*
115 * /proc/net/vlan
116 */
117
118static struct proc_dir_entry *proc_vlan_dir;
119
120/*
121 * /proc/net/vlan/config
122 */
123
124static struct proc_dir_entry *proc_vlan_conf;
125
126/* Strings */ 116/* Strings */
127static const char *vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { 117static const char *vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
128 [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", 118 [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID",
@@ -138,13 +128,15 @@ static const char *vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
138 * Clean up /proc/net/vlan entries 128 * Clean up /proc/net/vlan entries
139 */ 129 */
140 130
141void vlan_proc_cleanup(void) 131void vlan_proc_cleanup(struct net *net)
142{ 132{
143 if (proc_vlan_conf) 133 struct vlan_net *vn = net_generic(net, vlan_net_id);
144 remove_proc_entry(name_conf, proc_vlan_dir); 134
135 if (vn->proc_vlan_conf)
136 remove_proc_entry(name_conf, vn->proc_vlan_dir);
145 137
146 if (proc_vlan_dir) 138 if (vn->proc_vlan_dir)
147 proc_net_remove(&init_net, name_root); 139 proc_net_remove(net, name_root);
148 140
149 /* Dynamically added entries should be cleaned up as their vlan_device 141 /* Dynamically added entries should be cleaned up as their vlan_device
150 * is removed, so we should not have to take care of it here... 142 * is removed, so we should not have to take care of it here...
@@ -155,21 +147,23 @@ void vlan_proc_cleanup(void)
155 * Create /proc/net/vlan entries 147 * Create /proc/net/vlan entries
156 */ 148 */
157 149
158int __init vlan_proc_init(void) 150int vlan_proc_init(struct net *net)
159{ 151{
160 proc_vlan_dir = proc_mkdir(name_root, init_net.proc_net); 152 struct vlan_net *vn = net_generic(net, vlan_net_id);
161 if (!proc_vlan_dir) 153
154 vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net);
155 if (!vn->proc_vlan_dir)
162 goto err; 156 goto err;
163 157
164 proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR, 158 vn->proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR,
165 proc_vlan_dir, &vlan_fops); 159 vn->proc_vlan_dir, &vlan_fops);
166 if (!proc_vlan_conf) 160 if (!vn->proc_vlan_conf)
167 goto err; 161 goto err;
168 return 0; 162 return 0;
169 163
170err: 164err:
171 pr_err("%s: can't create entry in proc filesystem!\n", __FUNCTION__); 165 pr_err("%s: can't create entry in proc filesystem!\n", __func__);
172 vlan_proc_cleanup(); 166 vlan_proc_cleanup(net);
173 return -ENOBUFS; 167 return -ENOBUFS;
174} 168}
175 169
@@ -180,9 +174,10 @@ err:
180int vlan_proc_add_dev(struct net_device *vlandev) 174int vlan_proc_add_dev(struct net_device *vlandev)
181{ 175{
182 struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); 176 struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
177 struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
183 178
184 dev_info->dent = proc_create(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR, 179 dev_info->dent = proc_create(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
185 proc_vlan_dir, &vlandev_fops); 180 vn->proc_vlan_dir, &vlandev_fops);
186 if (!dev_info->dent) 181 if (!dev_info->dent)
187 return -ENOBUFS; 182 return -ENOBUFS;
188 183
@@ -195,10 +190,12 @@ int vlan_proc_add_dev(struct net_device *vlandev)
195 */ 190 */
196int vlan_proc_rem_dev(struct net_device *vlandev) 191int vlan_proc_rem_dev(struct net_device *vlandev)
197{ 192{
193 struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
194
198 /** NOTE: This will consume the memory pointed to by dent, it seems. */ 195 /** NOTE: This will consume the memory pointed to by dent, it seems. */
199 if (vlan_dev_info(vlandev)->dent) { 196 if (vlan_dev_info(vlandev)->dent) {
200 remove_proc_entry(vlan_dev_info(vlandev)->dent->name, 197 remove_proc_entry(vlan_dev_info(vlandev)->dent->name,
201 proc_vlan_dir); 198 vn->proc_vlan_dir);
202 vlan_dev_info(vlandev)->dent = NULL; 199 vlan_dev_info(vlandev)->dent = NULL;
203 } 200 }
204 return 0; 201 return 0;
@@ -215,6 +212,7 @@ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
215 __acquires(dev_base_lock) 212 __acquires(dev_base_lock)
216{ 213{
217 struct net_device *dev; 214 struct net_device *dev;
215 struct net *net = seq_file_net(seq);
218 loff_t i = 1; 216 loff_t i = 1;
219 217
220 read_lock(&dev_base_lock); 218 read_lock(&dev_base_lock);
@@ -222,7 +220,7 @@ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
222 if (*pos == 0) 220 if (*pos == 0)
223 return SEQ_START_TOKEN; 221 return SEQ_START_TOKEN;
224 222
225 for_each_netdev(&init_net, dev) { 223 for_each_netdev(net, dev) {
226 if (!is_vlan_dev(dev)) 224 if (!is_vlan_dev(dev))
227 continue; 225 continue;
228 226
@@ -236,14 +234,15 @@ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
236static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) 234static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
237{ 235{
238 struct net_device *dev; 236 struct net_device *dev;
237 struct net *net = seq_file_net(seq);
239 238
240 ++*pos; 239 ++*pos;
241 240
242 dev = (struct net_device *)v; 241 dev = (struct net_device *)v;
243 if (v == SEQ_START_TOKEN) 242 if (v == SEQ_START_TOKEN)
244 dev = net_device_entry(&init_net.dev_base_head); 243 dev = net_device_entry(&net->dev_base_head);
245 244
246 for_each_netdev_continue(&init_net, dev) { 245 for_each_netdev_continue(net, dev) {
247 if (!is_vlan_dev(dev)) 246 if (!is_vlan_dev(dev))
248 continue; 247 continue;
249 248
@@ -261,13 +260,16 @@ static void vlan_seq_stop(struct seq_file *seq, void *v)
261 260
262static int vlan_seq_show(struct seq_file *seq, void *v) 261static int vlan_seq_show(struct seq_file *seq, void *v)
263{ 262{
263 struct net *net = seq_file_net(seq);
264 struct vlan_net *vn = net_generic(net, vlan_net_id);
265
264 if (v == SEQ_START_TOKEN) { 266 if (v == SEQ_START_TOKEN) {
265 const char *nmtype = NULL; 267 const char *nmtype = NULL;
266 268
267 seq_puts(seq, "VLAN Dev name | VLAN ID\n"); 269 seq_puts(seq, "VLAN Dev name | VLAN ID\n");
268 270
269 if (vlan_name_type < ARRAY_SIZE(vlan_name_type_str)) 271 if (vn->name_type < ARRAY_SIZE(vlan_name_type_str))
270 nmtype = vlan_name_type_str[vlan_name_type]; 272 nmtype = vlan_name_type_str[vn->name_type];
271 273
272 seq_printf(seq, "Name-Type: %s\n", 274 seq_printf(seq, "Name-Type: %s\n",
273 nmtype ? nmtype : "UNKNOWN"); 275 nmtype ? nmtype : "UNKNOWN");
diff --git a/net/8021q/vlanproc.h b/net/8021q/vlanproc.h
index da542cacc5a5..063f60a3d5cc 100644
--- a/net/8021q/vlanproc.h
+++ b/net/8021q/vlanproc.h
@@ -2,15 +2,17 @@
2#define __BEN_VLAN_PROC_INC__ 2#define __BEN_VLAN_PROC_INC__
3 3
4#ifdef CONFIG_PROC_FS 4#ifdef CONFIG_PROC_FS
5int vlan_proc_init(void); 5struct net;
6
7int vlan_proc_init(struct net *net);
6int vlan_proc_rem_dev(struct net_device *vlandev); 8int vlan_proc_rem_dev(struct net_device *vlandev);
7int vlan_proc_add_dev(struct net_device *vlandev); 9int vlan_proc_add_dev(struct net_device *vlandev);
8void vlan_proc_cleanup(void); 10void vlan_proc_cleanup(struct net *net);
9 11
10#else /* No CONFIG_PROC_FS */ 12#else /* No CONFIG_PROC_FS */
11 13
12#define vlan_proc_init() (0) 14#define vlan_proc_init(net) (0)
13#define vlan_proc_cleanup() do {} while (0) 15#define vlan_proc_cleanup(net) do {} while (0)
14#define vlan_proc_add_dev(dev) ({(void)(dev), 0; }) 16#define vlan_proc_add_dev(dev) ({(void)(dev), 0; })
15#define vlan_proc_rem_dev(dev) ({(void)(dev), 0; }) 17#define vlan_proc_rem_dev(dev) ({(void)(dev), 0; })
16#endif 18#endif
diff --git a/net/9p/error.c b/net/9p/error.c
index ab2458b6c903..64104b9cb422 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -230,7 +230,7 @@ int p9_errstr2errno(char *errstr, int len)
230 if (errno == 0) { 230 if (errno == 0) {
231 /* TODO: if error isn't found, add it dynamically */ 231 /* TODO: if error isn't found, add it dynamically */
232 errstr[len] = 0; 232 errstr[len] = 0;
233 printk(KERN_ERR "%s: errstr :%s: not found\n", __FUNCTION__, 233 printk(KERN_ERR "%s: errstr :%s: not found\n", __func__,
234 errstr); 234 errstr);
235 errno = 1; 235 errno = 1;
236 } 236 }
diff --git a/net/Kconfig b/net/Kconfig
index 6627c6ae5db6..acbf7c60e89b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -45,7 +45,7 @@ config INET
45 ---help--- 45 ---help---
46 These are the protocols used on the Internet and on most local 46 These are the protocols used on the Internet and on most local
47 Ethernets. It is highly recommended to say Y here (this will enlarge 47 Ethernets. It is highly recommended to say Y here (this will enlarge
48 your kernel by about 144 KB), since some programs (e.g. the X window 48 your kernel by about 400 KB), since some programs (e.g. the X window
49 system) use TCP/IP even if your machine is not connected to any 49 system) use TCP/IP even if your machine is not connected to any
50 other computer. You will get the so-called loopback device which 50 other computer. You will get the so-called loopback device which
51 allows you to ping yourself (great fun, that!). 51 allows you to ping yourself (great fun, that!).
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 18058bbc7962..25aa37ce9430 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -333,7 +333,7 @@ static int aarp_device_event(struct notifier_block *this, unsigned long event,
333 struct net_device *dev = ptr; 333 struct net_device *dev = ptr;
334 int ct; 334 int ct;
335 335
336 if (dev->nd_net != &init_net) 336 if (dev_net(dev) != &init_net)
337 return NOTIFY_DONE; 337 return NOTIFY_DONE;
338 338
339 if (event == NETDEV_DOWN) { 339 if (event == NETDEV_DOWN) {
@@ -716,7 +716,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
716 struct atalk_addr sa, *ma, da; 716 struct atalk_addr sa, *ma, da;
717 struct atalk_iface *ifa; 717 struct atalk_iface *ifa;
718 718
719 if (dev->nd_net != &init_net) 719 if (dev_net(dev) != &init_net)
720 goto out0; 720 goto out0;
721 721
722 /* We only do Ethernet SNAP AARP. */ 722 /* We only do Ethernet SNAP AARP. */
@@ -1033,25 +1033,8 @@ static const struct seq_operations aarp_seq_ops = {
1033 1033
1034static int aarp_seq_open(struct inode *inode, struct file *file) 1034static int aarp_seq_open(struct inode *inode, struct file *file)
1035{ 1035{
1036 struct seq_file *seq; 1036 return seq_open_private(file, &aarp_seq_ops,
1037 int rc = -ENOMEM; 1037 sizeof(struct aarp_iter_state));
1038 struct aarp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1039
1040 if (!s)
1041 goto out;
1042
1043 rc = seq_open(file, &aarp_seq_ops);
1044 if (rc)
1045 goto out_kfree;
1046
1047 seq = file->private_data;
1048 seq->private = s;
1049 memset(s, 0, sizeof(*s));
1050out:
1051 return rc;
1052out_kfree:
1053 kfree(s);
1054 goto out;
1055} 1038}
1056 1039
1057const struct file_operations atalk_seq_arp_fops = { 1040const struct file_operations atalk_seq_arp_fops = {
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 3be55c8ca4ef..44cd42f7786b 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -648,7 +648,7 @@ static int ddp_device_event(struct notifier_block *this, unsigned long event,
648{ 648{
649 struct net_device *dev = ptr; 649 struct net_device *dev = ptr;
650 650
651 if (dev->nd_net != &init_net) 651 if (dev_net(dev) != &init_net)
652 return NOTIFY_DONE; 652 return NOTIFY_DONE;
653 653
654 if (event == NETDEV_DOWN) 654 if (event == NETDEV_DOWN)
@@ -1405,7 +1405,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1405 int origlen; 1405 int origlen;
1406 __u16 len_hops; 1406 __u16 len_hops;
1407 1407
1408 if (dev->nd_net != &init_net) 1408 if (dev_net(dev) != &init_net)
1409 goto freeit; 1409 goto freeit;
1410 1410
1411 /* Don't mangle buffer if shared */ 1411 /* Don't mangle buffer if shared */
@@ -1493,7 +1493,7 @@ freeit:
1493static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, 1493static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
1494 struct packet_type *pt, struct net_device *orig_dev) 1494 struct packet_type *pt, struct net_device *orig_dev)
1495{ 1495{
1496 if (dev->nd_net != &init_net) 1496 if (dev_net(dev) != &init_net)
1497 goto freeit; 1497 goto freeit;
1498 1498
1499 /* Expand any short form frames */ 1499 /* Expand any short form frames */
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 2ab1e36098fd..6f8223ebf551 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -612,7 +612,7 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
612{ 612{
613 struct net_device *dev = arg; 613 struct net_device *dev = arg;
614 614
615 if (dev->nd_net != &init_net) 615 if (dev_net(dev) != &init_net)
616 return NOTIFY_DONE; 616 return NOTIFY_DONE;
617 617
618 if (event == NETDEV_UNREGISTER) { 618 if (event == NETDEV_UNREGISTER) {
@@ -648,10 +648,6 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
648 struct in_device *in_dev; 648 struct in_device *in_dev;
649 649
650 in_dev = ((struct in_ifaddr *)ifa)->ifa_dev; 650 in_dev = ((struct in_ifaddr *)ifa)->ifa_dev;
651 if (!in_dev || !in_dev->dev) {
652 printk(KERN_WARNING "clip_inet_event: no device\n");
653 return NOTIFY_DONE;
654 }
655 /* 651 /*
656 * Transitions are of the down-change-up type, so it's sufficient to 652 * Transitions are of the down-change-up type, so it's sufficient to
657 * handle the change on up. 653 * handle the change on up.
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 3235c57615e4..653aca3573ac 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1023,7 +1023,7 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl,
1023 1023
1024 if (!e) 1024 if (!e)
1025 e = tbl->first; 1025 e = tbl->first;
1026 if (e == (void *)1) { 1026 if (e == SEQ_START_TOKEN) {
1027 e = tbl->first; 1027 e = tbl->first;
1028 --*l; 1028 --*l;
1029 } 1029 }
@@ -1125,9 +1125,9 @@ static void *lec_seq_start(struct seq_file *seq, loff_t *pos)
1125 state->locked = NULL; 1125 state->locked = NULL;
1126 state->arp_table = 0; 1126 state->arp_table = 0;
1127 state->misc_table = 0; 1127 state->misc_table = 0;
1128 state->node = (void *)1; 1128 state->node = SEQ_START_TOKEN;
1129 1129
1130 return *pos ? lec_get_idx(state, *pos) : (void *)1; 1130 return *pos ? lec_get_idx(state, *pos) : SEQ_START_TOKEN;
1131} 1131}
1132 1132
1133static void lec_seq_stop(struct seq_file *seq, void *v) 1133static void lec_seq_stop(struct seq_file *seq, void *v)
@@ -1156,7 +1156,7 @@ static int lec_seq_show(struct seq_file *seq, void *v)
1156 " Status Flags " 1156 " Status Flags "
1157 "VPI/VCI Recv VPI/VCI\n"; 1157 "VPI/VCI Recv VPI/VCI\n";
1158 1158
1159 if (v == (void *)1) 1159 if (v == SEQ_START_TOKEN)
1160 seq_puts(seq, lec_banner); 1160 seq_puts(seq, lec_banner);
1161 else { 1161 else {
1162 struct lec_state *state = seq->private; 1162 struct lec_state *state = seq->private;
@@ -1178,32 +1178,7 @@ static const struct seq_operations lec_seq_ops = {
1178 1178
1179static int lec_seq_open(struct inode *inode, struct file *file) 1179static int lec_seq_open(struct inode *inode, struct file *file)
1180{ 1180{
1181 struct lec_state *state; 1181 return seq_open_private(file, &lec_seq_ops, sizeof(struct lec_state));
1182 struct seq_file *seq;
1183 int rc = -EAGAIN;
1184
1185 state = kmalloc(sizeof(*state), GFP_KERNEL);
1186 if (!state) {
1187 rc = -ENOMEM;
1188 goto out;
1189 }
1190
1191 rc = seq_open(file, &lec_seq_ops);
1192 if (rc)
1193 goto out_kfree;
1194 seq = file->private_data;
1195 seq->private = state;
1196out:
1197 return rc;
1198
1199out_kfree:
1200 kfree(state);
1201 goto out;
1202}
1203
1204static int lec_seq_release(struct inode *inode, struct file *file)
1205{
1206 return seq_release_private(inode, file);
1207} 1182}
1208 1183
1209static const struct file_operations lec_seq_fops = { 1184static const struct file_operations lec_seq_fops = {
@@ -1211,7 +1186,7 @@ static const struct file_operations lec_seq_fops = {
1211 .open = lec_seq_open, 1186 .open = lec_seq_open,
1212 .read = seq_read, 1187 .read = seq_read,
1213 .llseek = seq_lseek, 1188 .llseek = seq_lseek,
1214 .release = lec_seq_release, 1189 .release = seq_release_private,
1215}; 1190};
1216#endif 1191#endif
1217 1192
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 9c7f712fc7e9..9db332e7a6c0 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -964,7 +964,7 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
964 964
965 dev = (struct net_device *)dev_ptr; 965 dev = (struct net_device *)dev_ptr;
966 966
967 if (dev->nd_net != &init_net) 967 if (dev_net(dev) != &init_net)
968 return NOTIFY_DONE; 968 return NOTIFY_DONE;
969 969
970 if (dev->name == NULL || strncmp(dev->name, "lec", 3)) 970 if (dev->name == NULL || strncmp(dev->name, "lec", 3))
diff --git a/net/atm/proc.c b/net/atm/proc.c
index e9693aed7ef8..5c9f3d148135 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -78,7 +78,7 @@ static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l)
78{ 78{
79 struct sock *sk = *sock; 79 struct sock *sk = *sock;
80 80
81 if (sk == (void *)1) { 81 if (sk == SEQ_START_TOKEN) {
82 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { 82 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) {
83 struct hlist_head *head = &vcc_hash[*bucket]; 83 struct hlist_head *head = &vcc_hash[*bucket];
84 84
@@ -98,7 +98,7 @@ try_again:
98 sk = sk_head(&vcc_hash[*bucket]); 98 sk = sk_head(&vcc_hash[*bucket]);
99 goto try_again; 99 goto try_again;
100 } 100 }
101 sk = (void *)1; 101 sk = SEQ_START_TOKEN;
102out: 102out:
103 *sock = sk; 103 *sock = sk;
104 return (l < 0); 104 return (l < 0);
@@ -114,31 +114,13 @@ static int __vcc_seq_open(struct inode *inode, struct file *file,
114 int family, const struct seq_operations *ops) 114 int family, const struct seq_operations *ops)
115{ 115{
116 struct vcc_state *state; 116 struct vcc_state *state;
117 struct seq_file *seq;
118 int rc = -ENOMEM;
119 117
120 state = kmalloc(sizeof(*state), GFP_KERNEL); 118 state = __seq_open_private(file, ops, sizeof(*state));
121 if (!state) 119 if (state == NULL)
122 goto out; 120 return -ENOMEM;
123
124 rc = seq_open(file, ops);
125 if (rc)
126 goto out_kfree;
127 121
128 state->family = family; 122 state->family = family;
129 123 return 0;
130 seq = file->private_data;
131 seq->private = state;
132out:
133 return rc;
134out_kfree:
135 kfree(state);
136 goto out;
137}
138
139static int vcc_seq_release(struct inode *inode, struct file *file)
140{
141 return seq_release_private(inode, file);
142} 124}
143 125
144static void *vcc_seq_start(struct seq_file *seq, loff_t *pos) 126static void *vcc_seq_start(struct seq_file *seq, loff_t *pos)
@@ -148,8 +130,8 @@ static void *vcc_seq_start(struct seq_file *seq, loff_t *pos)
148 loff_t left = *pos; 130 loff_t left = *pos;
149 131
150 read_lock(&vcc_sklist_lock); 132 read_lock(&vcc_sklist_lock);
151 state->sk = (void *)1; 133 state->sk = SEQ_START_TOKEN;
152 return left ? vcc_walk(state, left) : (void *)1; 134 return left ? vcc_walk(state, left) : SEQ_START_TOKEN;
153} 135}
154 136
155static void vcc_seq_stop(struct seq_file *seq, void *v) 137static void vcc_seq_stop(struct seq_file *seq, void *v)
@@ -253,7 +235,7 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
253 "Itf Type ESI/\"MAC\"addr " 235 "Itf Type ESI/\"MAC\"addr "
254 "AAL(TX,err,RX,err,drop) ... [refcnt]\n"; 236 "AAL(TX,err,RX,err,drop) ... [refcnt]\n";
255 237
256 if (v == (void *)1) 238 if (v == SEQ_START_TOKEN)
257 seq_puts(seq, atm_dev_banner); 239 seq_puts(seq, atm_dev_banner);
258 else { 240 else {
259 struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list); 241 struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list);
@@ -287,7 +269,7 @@ static int pvc_seq_show(struct seq_file *seq, void *v)
287 static char atm_pvc_banner[] = 269 static char atm_pvc_banner[] =
288 "Itf VPI VCI AAL RX(PCR,Class) TX(PCR,Class)\n"; 270 "Itf VPI VCI AAL RX(PCR,Class) TX(PCR,Class)\n";
289 271
290 if (v == (void *)1) 272 if (v == SEQ_START_TOKEN)
291 seq_puts(seq, atm_pvc_banner); 273 seq_puts(seq, atm_pvc_banner);
292 else { 274 else {
293 struct vcc_state *state = seq->private; 275 struct vcc_state *state = seq->private;
@@ -314,12 +296,12 @@ static const struct file_operations pvc_seq_fops = {
314 .open = pvc_seq_open, 296 .open = pvc_seq_open,
315 .read = seq_read, 297 .read = seq_read,
316 .llseek = seq_lseek, 298 .llseek = seq_lseek,
317 .release = vcc_seq_release, 299 .release = seq_release_private,
318}; 300};
319 301
320static int vcc_seq_show(struct seq_file *seq, void *v) 302static int vcc_seq_show(struct seq_file *seq, void *v)
321{ 303{
322 if (v == (void *)1) { 304 if (v == SEQ_START_TOKEN) {
323 seq_printf(seq, sizeof(void *) == 4 ? "%-8s%s" : "%-16s%s", 305 seq_printf(seq, sizeof(void *) == 4 ? "%-8s%s" : "%-16s%s",
324 "Address ", "Itf VPI VCI Fam Flags Reply " 306 "Address ", "Itf VPI VCI Fam Flags Reply "
325 "Send buffer Recv buffer [refcnt]\n"); 307 "Send buffer Recv buffer [refcnt]\n");
@@ -348,7 +330,7 @@ static const struct file_operations vcc_seq_fops = {
348 .open = vcc_seq_open, 330 .open = vcc_seq_open,
349 .read = seq_read, 331 .read = seq_read,
350 .llseek = seq_lseek, 332 .llseek = seq_lseek,
351 .release = vcc_seq_release, 333 .release = seq_release_private,
352}; 334};
353 335
354static int svc_seq_show(struct seq_file *seq, void *v) 336static int svc_seq_show(struct seq_file *seq, void *v)
@@ -356,7 +338,7 @@ static int svc_seq_show(struct seq_file *seq, void *v)
356 static char atm_svc_banner[] = 338 static char atm_svc_banner[] =
357 "Itf VPI VCI State Remote\n"; 339 "Itf VPI VCI State Remote\n";
358 340
359 if (v == (void *)1) 341 if (v == SEQ_START_TOKEN)
360 seq_puts(seq, atm_svc_banner); 342 seq_puts(seq, atm_svc_banner);
361 else { 343 else {
362 struct vcc_state *state = seq->private; 344 struct vcc_state *state = seq->private;
@@ -383,7 +365,7 @@ static const struct file_operations svc_seq_fops = {
383 .open = svc_seq_open, 365 .open = svc_seq_open,
384 .read = seq_read, 366 .read = seq_read,
385 .llseek = seq_lseek, 367 .llseek = seq_lseek,
386 .release = vcc_seq_release, 368 .release = seq_release_private,
387}; 369};
388 370
389static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, 371static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 1bcf6dc8d409..a34ba948af96 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -415,7 +415,7 @@ static __inline__ void *dev_get_idx(loff_t left)
415void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos) 415void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
416{ 416{
417 mutex_lock(&atm_dev_mutex); 417 mutex_lock(&atm_dev_mutex);
418 return *pos ? dev_get_idx(*pos) : (void *) 1; 418 return *pos ? dev_get_idx(*pos) : SEQ_START_TOKEN;
419} 419}
420 420
421void atm_dev_seq_stop(struct seq_file *seq, void *v) 421void atm_dev_seq_stop(struct seq_file *seq, void *v)
@@ -426,7 +426,8 @@ void atm_dev_seq_stop(struct seq_file *seq, void *v)
426void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 426void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
427{ 427{
428 ++*pos; 428 ++*pos;
429 v = (v == (void *)1) ? atm_devs.next : ((struct list_head *)v)->next; 429 v = (v == SEQ_START_TOKEN)
430 ? atm_devs.next : ((struct list_head *)v)->next;
430 return (v == &atm_devs) ? NULL : v; 431 return (v == &atm_devs) ? NULL : v;
431} 432}
432 433
diff --git a/net/atm/svc.c b/net/atm/svc.c
index daf9a48a7db0..de1e4f2f3a43 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -326,7 +326,7 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
326 326
327 lock_sock(sk); 327 lock_sock(sk);
328 328
329 error = svc_create(sk->sk_net, newsock,0); 329 error = svc_create(sock_net(sk), newsock,0);
330 if (error) 330 if (error)
331 goto out; 331 goto out;
332 332
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 48bfcc741f25..2712544cf0ca 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -116,7 +116,7 @@ static int ax25_device_event(struct notifier_block *this, unsigned long event,
116{ 116{
117 struct net_device *dev = (struct net_device *)ptr; 117 struct net_device *dev = (struct net_device *)ptr;
118 118
119 if (dev->nd_net != &init_net) 119 if (dev_net(dev) != &init_net)
120 return NOTIFY_DONE; 120 return NOTIFY_DONE;
121 121
122 /* Reject non AX.25 devices */ 122 /* Reject non AX.25 devices */
@@ -869,7 +869,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
869 struct sock *sk; 869 struct sock *sk;
870 ax25_cb *ax25, *oax25; 870 ax25_cb *ax25, *oax25;
871 871
872 sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot); 872 sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot);
873 if (sk == NULL) 873 if (sk == NULL)
874 return NULL; 874 return NULL;
875 875
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index d1be080dcb25..33790a8efbc8 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -451,7 +451,7 @@ int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
451 skb->sk = NULL; /* Initially we don't know who it's for */ 451 skb->sk = NULL; /* Initially we don't know who it's for */
452 skb->destructor = NULL; /* Who initializes this, dammit?! */ 452 skb->destructor = NULL; /* Who initializes this, dammit?! */
453 453
454 if (dev->nd_net != &init_net) { 454 if (dev_net(dev) != &init_net) {
455 kfree_skb(skb); 455 kfree_skb(skb);
456 return 0; 456 return 0;
457 } 457 }
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 2957df4b6c0b..a4849f2c1d81 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1499,7 +1499,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
1499 goto response; 1499 goto response;
1500 } 1500 }
1501 1501
1502 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC); 1502 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1503 if (!sk) 1503 if (!sk)
1504 goto response; 1504 goto response;
1505 1505
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index af4e3934ee84..5083adcbfae5 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -868,7 +868,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
868 goto done; 868 goto done;
869 } 869 }
870 870
871 sk = rfcomm_sock_alloc(parent->sk_net, NULL, BTPROTO_RFCOMM, GFP_ATOMIC); 871 sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
872 if (!sk) 872 if (!sk)
873 goto done; 873 goto done;
874 874
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cd887cdca426..b0d487e2db20 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -803,7 +803,7 @@ static void sco_conn_ready(struct sco_conn *conn)
803 803
804 bh_lock_sock(parent); 804 bh_lock_sock(parent);
805 805
806 sk = sco_sock_alloc(parent->sk_net, NULL, BTPROTO_SCO, GFP_ATOMIC); 806 sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC);
807 if (!sk) { 807 if (!sk) {
808 bh_unlock_sock(parent); 808 bh_unlock_sock(parent);
809 goto done; 809 goto done;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index af7e8be8d8d2..bb90cd7bace3 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -111,7 +111,9 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
111 * require us to fill additional fields. */ 111 * require us to fill additional fields. */
112static struct net_device __fake_net_device = { 112static struct net_device __fake_net_device = {
113 .hard_header_len = ETH_HLEN, 113 .hard_header_len = ETH_HLEN,
114#ifdef CONFIG_NET_NS
114 .nd_net = &init_net, 115 .nd_net = &init_net,
116#endif
115}; 117};
116 118
117static struct rtable __fake_rtable = { 119static struct rtable __fake_rtable = {
@@ -224,8 +226,8 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
224 } 226 }
225 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; 227 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
226 228
227 skb->dst = (struct dst_entry *)&__fake_rtable; 229 skb->rtable = &__fake_rtable;
228 dst_hold(skb->dst); 230 dst_hold(&__fake_rtable.u.dst);
229 231
230 skb->dev = nf_bridge->physindev; 232 skb->dev = nf_bridge->physindev;
231 nf_bridge_push_encap_header(skb); 233 nf_bridge_push_encap_header(skb);
@@ -389,8 +391,8 @@ bridged_dnat:
389 skb->pkt_type = PACKET_HOST; 391 skb->pkt_type = PACKET_HOST;
390 } 392 }
391 } else { 393 } else {
392 skb->dst = (struct dst_entry *)&__fake_rtable; 394 skb->rtable = &__fake_rtable;
393 dst_hold(skb->dst); 395 dst_hold(&__fake_rtable.u.dst);
394 } 396 }
395 397
396 skb->dev = nf_bridge->physindev; 398 skb->dev = nf_bridge->physindev;
@@ -609,9 +611,9 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
609 const struct net_device *out, 611 const struct net_device *out,
610 int (*okfn)(struct sk_buff *)) 612 int (*okfn)(struct sk_buff *))
611{ 613{
612 if (skb->dst == (struct dst_entry *)&__fake_rtable) { 614 if (skb->rtable == &__fake_rtable) {
613 dst_release(skb->dst); 615 dst_release(&__fake_rtable.u.dst);
614 skb->dst = NULL; 616 skb->rtable = NULL;
615 } 617 }
616 618
617 return NF_ACCEPT; 619 return NF_ACCEPT;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f5d69336d97b..f155e6ce8a21 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -108,7 +108,7 @@ errout:
108 */ 108 */
109static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 109static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
110{ 110{
111 struct net *net = skb->sk->sk_net; 111 struct net *net = sock_net(skb->sk);
112 struct net_device *dev; 112 struct net_device *dev;
113 int idx; 113 int idx;
114 114
@@ -140,7 +140,7 @@ skip:
140 */ 140 */
141static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 141static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
142{ 142{
143 struct net *net = skb->sk->sk_net; 143 struct net *net = sock_net(skb->sk);
144 struct ifinfomsg *ifm; 144 struct ifinfomsg *ifm;
145 struct nlattr *protinfo; 145 struct nlattr *protinfo;
146 struct net_device *dev; 146 struct net_device *dev;
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 07ac3ae68d8f..00644a544e3c 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -37,7 +37,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
37 struct net_bridge_port *p = dev->br_port; 37 struct net_bridge_port *p = dev->br_port;
38 struct net_bridge *br; 38 struct net_bridge *br;
39 39
40 if (dev->nd_net != &init_net) 40 if (dev_net(dev) != &init_net)
41 return NOTIFY_DONE; 41 return NOTIFY_DONE;
42 42
43 /* not a port of a bridge */ 43 /* not a port of a bridge */
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 0edbd2a1c3f3..8deab645ef75 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -142,7 +142,7 @@ int br_stp_rcv(struct sk_buff *skb, struct net_device *dev,
142 struct net_bridge *br; 142 struct net_bridge *br;
143 const unsigned char *buf; 143 const unsigned char *buf;
144 144
145 if (dev->nd_net != &init_net) 145 if (dev_net(dev) != &init_net)
146 goto err; 146 goto err;
147 147
148 if (!p) 148 if (!p)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 9cf0538d1717..27d6a511c8c1 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -415,21 +415,21 @@ int br_sysfs_addbr(struct net_device *dev)
415 err = sysfs_create_group(brobj, &bridge_group); 415 err = sysfs_create_group(brobj, &bridge_group);
416 if (err) { 416 if (err) {
417 pr_info("%s: can't create group %s/%s\n", 417 pr_info("%s: can't create group %s/%s\n",
418 __FUNCTION__, dev->name, bridge_group.name); 418 __func__, dev->name, bridge_group.name);
419 goto out1; 419 goto out1;
420 } 420 }
421 421
422 err = sysfs_create_bin_file(brobj, &bridge_forward); 422 err = sysfs_create_bin_file(brobj, &bridge_forward);
423 if (err) { 423 if (err) {
424 pr_info("%s: can't create attribute file %s/%s\n", 424 pr_info("%s: can't create attribute file %s/%s\n",
425 __FUNCTION__, dev->name, bridge_forward.attr.name); 425 __func__, dev->name, bridge_forward.attr.name);
426 goto out2; 426 goto out2;
427 } 427 }
428 428
429 br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj); 429 br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj);
430 if (!br->ifobj) { 430 if (!br->ifobj) {
431 pr_info("%s: can't add kobject (directory) %s/%s\n", 431 pr_info("%s: can't add kobject (directory) %s/%s\n",
432 __FUNCTION__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR); 432 __func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR);
433 goto out3; 433 goto out3;
434 } 434 }
435 return 0; 435 return 0;
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 4a3e2bf892c7..7beeefa0f9c0 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -212,4 +212,18 @@ config BRIDGE_EBT_ULOG
212 212
213 To compile it as a module, choose M here. If unsure, say N. 213 To compile it as a module, choose M here. If unsure, say N.
214 214
215config BRIDGE_EBT_NFLOG
216 tristate "ebt: nflog support"
217 depends on BRIDGE_NF_EBTABLES
218 help
219 This option enables the nflog watcher, which allows to LOG
220 messages through the netfilter logging API, which can use
221 either the old LOG target, the old ULOG target or nfnetlink_log
222 as backend.
223
224 This option adds the ulog watcher, that you can use in any rule
225 in any ebtables table.
226
227 To compile it as a module, choose M here. If unsure, say N.
228
215endmenu 229endmenu
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 905087e0d485..83715d73a503 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
30# watchers 30# watchers
31obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o 31obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
32obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o 32obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o
33obj-$(CONFIG_BRIDGE_EBT_NFLOG) += ebt_nflog.o
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
new file mode 100644
index 000000000000..8e799aa9e560
--- /dev/null
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -0,0 +1,74 @@
1/*
2 * ebt_nflog
3 *
4 * Author:
5 * Peter Warasin <peter@endian.com>
6 *
7 * February, 2008
8 *
9 * Based on:
10 * xt_NFLOG.c, (C) 2006 by Patrick McHardy <kaber@trash.net>
11 * ebt_ulog.c, (C) 2004 by Bart De Schuymer <bdschuym@pandora.be>
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/spinlock.h>
17#include <linux/netfilter_bridge/ebtables.h>
18#include <linux/netfilter_bridge/ebt_nflog.h>
19#include <net/netfilter/nf_log.h>
20
21static void ebt_nflog(const struct sk_buff *skb,
22 unsigned int hooknr,
23 const struct net_device *in,
24 const struct net_device *out,
25 const void *data, unsigned int datalen)
26{
27 struct ebt_nflog_info *info = (struct ebt_nflog_info *)data;
28 struct nf_loginfo li;
29
30 li.type = NF_LOG_TYPE_ULOG;
31 li.u.ulog.copy_len = info->len;
32 li.u.ulog.group = info->group;
33 li.u.ulog.qthreshold = info->threshold;
34
35 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, "%s", info->prefix);
36}
37
38static int ebt_nflog_check(const char *tablename,
39 unsigned int hookmask,
40 const struct ebt_entry *e,
41 void *data, unsigned int datalen)
42{
43 struct ebt_nflog_info *info = (struct ebt_nflog_info *)data;
44
45 if (datalen != EBT_ALIGN(sizeof(struct ebt_nflog_info)))
46 return -EINVAL;
47 if (info->flags & ~EBT_NFLOG_MASK)
48 return -EINVAL;
49 info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
50 return 0;
51}
52
53static struct ebt_watcher nflog __read_mostly = {
54 .name = EBT_NFLOG_WATCHER,
55 .watcher = ebt_nflog,
56 .check = ebt_nflog_check,
57 .me = THIS_MODULE,
58};
59
60static int __init ebt_nflog_init(void)
61{
62 return ebt_register_watcher(&nflog);
63}
64
65static void __exit ebt_nflog_fini(void)
66{
67 ebt_unregister_watcher(&nflog);
68}
69
70module_init(ebt_nflog_init);
71module_exit(ebt_nflog_fini);
72MODULE_LICENSE("GPL");
73MODULE_AUTHOR("Peter Warasin <peter@endian.com>");
74MODULE_DESCRIPTION("ebtables NFLOG netfilter logging module");
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index be6f18681053..246626bb0c87 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -46,7 +46,7 @@ static struct ebt_table broute_table =
46 .name = "broute", 46 .name = "broute",
47 .table = &initial_table, 47 .table = &initial_table,
48 .valid_hooks = 1 << NF_BR_BROUTING, 48 .valid_hooks = 1 << NF_BR_BROUTING,
49 .lock = RW_LOCK_UNLOCKED, 49 .lock = __RW_LOCK_UNLOCKED(broute_table.lock),
50 .check = check, 50 .check = check,
51 .me = THIS_MODULE, 51 .me = THIS_MODULE,
52}; 52};
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index fb810908732f..690bc3ab186c 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -55,7 +55,7 @@ static struct ebt_table frame_filter =
55 .name = "filter", 55 .name = "filter",
56 .table = &initial_table, 56 .table = &initial_table,
57 .valid_hooks = FILTER_VALID_HOOKS, 57 .valid_hooks = FILTER_VALID_HOOKS,
58 .lock = RW_LOCK_UNLOCKED, 58 .lock = __RW_LOCK_UNLOCKED(frame_filter.lock),
59 .check = check, 59 .check = check,
60 .me = THIS_MODULE, 60 .me = THIS_MODULE,
61}; 61};
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index bc712730c54a..5b495fe2d0b6 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -55,7 +55,7 @@ static struct ebt_table frame_nat =
55 .name = "nat", 55 .name = "nat",
56 .table = &initial_table, 56 .table = &initial_table,
57 .valid_hooks = NAT_VALID_HOOKS, 57 .valid_hooks = NAT_VALID_HOOKS,
58 .lock = RW_LOCK_UNLOCKED, 58 .lock = __RW_LOCK_UNLOCKED(frame_nat.lock),
59 .check = check, 59 .check = check,
60 .me = THIS_MODULE, 60 .me = THIS_MODULE,
61}; 61};
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 36b9f22ed83a..2759b76f731c 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -599,7 +599,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
599 struct dev_rcv_lists *d; 599 struct dev_rcv_lists *d;
600 int matches; 600 int matches;
601 601
602 if (dev->type != ARPHRD_CAN || dev->nd_net != &init_net) { 602 if (dev->type != ARPHRD_CAN || dev_net(dev) != &init_net) {
603 kfree_skb(skb); 603 kfree_skb(skb);
604 return 0; 604 return 0;
605 } 605 }
@@ -710,7 +710,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
710 struct net_device *dev = (struct net_device *)data; 710 struct net_device *dev = (struct net_device *)data;
711 struct dev_rcv_lists *d; 711 struct dev_rcv_lists *d;
712 712
713 if (dev->nd_net != &init_net) 713 if (dev_net(dev) != &init_net)
714 return NOTIFY_DONE; 714 return NOTIFY_DONE;
715 715
716 if (dev->type != ARPHRD_CAN) 716 if (dev->type != ARPHRD_CAN)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index bd4282dae754..74fd2d33aff4 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -43,6 +43,7 @@
43 43
44#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/hrtimer.h>
46#include <linux/list.h> 47#include <linux/list.h>
47#include <linux/proc_fs.h> 48#include <linux/proc_fs.h>
48#include <linux/uio.h> 49#include <linux/uio.h>
@@ -66,7 +67,7 @@
66#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \ 67#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \
67 (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK)) 68 (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK))
68 69
69#define CAN_BCM_VERSION CAN_VERSION 70#define CAN_BCM_VERSION "20080415"
70static __initdata const char banner[] = KERN_INFO 71static __initdata const char banner[] = KERN_INFO
71 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; 72 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";
72 73
@@ -85,11 +86,10 @@ struct bcm_op {
85 int ifindex; 86 int ifindex;
86 canid_t can_id; 87 canid_t can_id;
87 int flags; 88 int flags;
88 unsigned long j_ival1, j_ival2, j_lastmsg;
89 unsigned long frames_abs, frames_filtered; 89 unsigned long frames_abs, frames_filtered;
90 struct timer_list timer, thrtimer;
91 struct timeval ival1, ival2; 90 struct timeval ival1, ival2;
92 ktime_t rx_stamp; 91 struct hrtimer timer, thrtimer;
92 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
93 int rx_ifindex; 93 int rx_ifindex;
94 int count; 94 int count;
95 int nframes; 95 int nframes;
@@ -126,39 +126,6 @@ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
126#define MHSIZ sizeof(struct bcm_msg_head) 126#define MHSIZ sizeof(struct bcm_msg_head)
127 127
128/* 128/*
129 * rounded_tv2jif - calculate jiffies from timeval including optional up
130 * @tv: pointer to timeval
131 *
132 * Description:
133 * Unlike timeval_to_jiffies() provided in include/linux/jiffies.h, this
134 * function is intentionally more relaxed on precise timer ticks to get
135 * exact one jiffy for requested 1000us on a 1000HZ machine.
136 * This code is to be removed when upgrading to kernel hrtimer.
137 *
138 * Return:
139 * calculated jiffies (max: ULONG_MAX)
140 */
141static unsigned long rounded_tv2jif(const struct timeval *tv)
142{
143 unsigned long sec = tv->tv_sec;
144 unsigned long usec = tv->tv_usec;
145 unsigned long jif;
146
147 if (sec > ULONG_MAX / HZ)
148 return ULONG_MAX;
149
150 /* round up to get at least the requested time */
151 usec += 1000000 / HZ - 1;
152
153 jif = usec / (1000000 / HZ);
154
155 if (sec * HZ > ULONG_MAX - jif)
156 return ULONG_MAX;
157
158 return jif + sec * HZ;
159}
160
161/*
162 * procfs functions 129 * procfs functions
163 */ 130 */
164static char *bcm_proc_getifname(int ifindex) 131static char *bcm_proc_getifname(int ifindex)
@@ -208,13 +175,17 @@ static int bcm_read_proc(char *page, char **start, off_t off,
208 len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ", 175 len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ",
209 op->nframes, 176 op->nframes,
210 (op->flags & RX_CHECK_DLC)?'d':' '); 177 (op->flags & RX_CHECK_DLC)?'d':' ');
211 if (op->j_ival1) 178 if (op->kt_ival1.tv64)
212 len += snprintf(page + len, PAGE_SIZE - len, 179 len += snprintf(page + len, PAGE_SIZE - len,
213 "timeo=%ld ", op->j_ival1); 180 "timeo=%lld ",
181 (long long)
182 ktime_to_us(op->kt_ival1));
214 183
215 if (op->j_ival2) 184 if (op->kt_ival2.tv64)
216 len += snprintf(page + len, PAGE_SIZE - len, 185 len += snprintf(page + len, PAGE_SIZE - len,
217 "thr=%ld ", op->j_ival2); 186 "thr=%lld ",
187 (long long)
188 ktime_to_us(op->kt_ival2));
218 189
219 len += snprintf(page + len, PAGE_SIZE - len, 190 len += snprintf(page + len, PAGE_SIZE - len,
220 "# recv %ld (%ld) => reduction: ", 191 "# recv %ld (%ld) => reduction: ",
@@ -238,13 +209,14 @@ static int bcm_read_proc(char *page, char **start, off_t off,
238 "tx_op: %03X %s [%d] ", 209 "tx_op: %03X %s [%d] ",
239 op->can_id, bcm_proc_getifname(op->ifindex), 210 op->can_id, bcm_proc_getifname(op->ifindex),
240 op->nframes); 211 op->nframes);
241 if (op->j_ival1)
242 len += snprintf(page + len, PAGE_SIZE - len, "t1=%ld ",
243 op->j_ival1);
244 212
245 if (op->j_ival2) 213 if (op->kt_ival1.tv64)
246 len += snprintf(page + len, PAGE_SIZE - len, "t2=%ld ", 214 len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ",
247 op->j_ival2); 215 (long long) ktime_to_us(op->kt_ival1));
216
217 if (op->kt_ival2.tv64)
218 len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ",
219 (long long) ktime_to_us(op->kt_ival2));
248 220
249 len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n", 221 len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n",
250 op->frames_abs); 222 op->frames_abs);
@@ -371,11 +343,12 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
371/* 343/*
372 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions 344 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
373 */ 345 */
374static void bcm_tx_timeout_handler(unsigned long data) 346static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
375{ 347{
376 struct bcm_op *op = (struct bcm_op *)data; 348 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
349 enum hrtimer_restart ret = HRTIMER_NORESTART;
377 350
378 if (op->j_ival1 && (op->count > 0)) { 351 if (op->kt_ival1.tv64 && (op->count > 0)) {
379 352
380 op->count--; 353 op->count--;
381 if (!op->count && (op->flags & TX_COUNTEVT)) { 354 if (!op->count && (op->flags & TX_COUNTEVT)) {
@@ -394,22 +367,24 @@ static void bcm_tx_timeout_handler(unsigned long data)
394 } 367 }
395 } 368 }
396 369
397 if (op->j_ival1 && (op->count > 0)) { 370 if (op->kt_ival1.tv64 && (op->count > 0)) {
398 371
399 /* send (next) frame */ 372 /* send (next) frame */
400 bcm_can_tx(op); 373 bcm_can_tx(op);
401 mod_timer(&op->timer, jiffies + op->j_ival1); 374 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1);
375 ret = HRTIMER_RESTART;
402 376
403 } else { 377 } else {
404 if (op->j_ival2) { 378 if (op->kt_ival2.tv64) {
405 379
406 /* send (next) frame */ 380 /* send (next) frame */
407 bcm_can_tx(op); 381 bcm_can_tx(op);
408 mod_timer(&op->timer, jiffies + op->j_ival2); 382 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
383 ret = HRTIMER_RESTART;
409 } 384 }
410 } 385 }
411 386
412 return; 387 return ret;
413} 388}
414 389
415/* 390/*
@@ -419,8 +394,6 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
419{ 394{
420 struct bcm_msg_head head; 395 struct bcm_msg_head head;
421 396
422 op->j_lastmsg = jiffies;
423
424 /* update statistics */ 397 /* update statistics */
425 op->frames_filtered++; 398 op->frames_filtered++;
426 399
@@ -439,6 +412,12 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
439 bcm_send_to_user(op, &head, data, 1); 412 bcm_send_to_user(op, &head, data, 1);
440} 413}
441 414
415/* TODO: move to linux/hrtimer.h */
416static inline int hrtimer_callback_running(struct hrtimer *timer)
417{
418 return timer->state & HRTIMER_STATE_CALLBACK;
419}
420
442/* 421/*
443 * bcm_rx_update_and_send - process a detected relevant receive content change 422 * bcm_rx_update_and_send - process a detected relevant receive content change
444 * 1. update the last received data 423 * 1. update the last received data
@@ -448,30 +427,44 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
448 struct can_frame *lastdata, 427 struct can_frame *lastdata,
449 struct can_frame *rxdata) 428 struct can_frame *rxdata)
450{ 429{
451 unsigned long nexttx = op->j_lastmsg + op->j_ival2;
452
453 memcpy(lastdata, rxdata, CFSIZ); 430 memcpy(lastdata, rxdata, CFSIZ);
454 431
455 /* mark as used */ 432 /* mark as used */
456 lastdata->can_dlc |= RX_RECV; 433 lastdata->can_dlc |= RX_RECV;
457 434
458 /* throttle bcm_rx_changed ? */ 435 /* throtteling mode inactive OR data update already on the run ? */
459 if ((op->thrtimer.expires) || 436 if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) {
460 ((op->j_ival2) && (nexttx > jiffies))) { 437 /* send RX_CHANGED to the user immediately */
461 /* we are already waiting OR we have to start waiting */ 438 bcm_rx_changed(op, rxdata);
439 return;
440 }
462 441
442 if (hrtimer_active(&op->thrtimer)) {
463 /* mark as 'throttled' */ 443 /* mark as 'throttled' */
464 lastdata->can_dlc |= RX_THR; 444 lastdata->can_dlc |= RX_THR;
445 return;
446 }
465 447
466 if (!(op->thrtimer.expires)) { 448 if (!op->kt_lastmsg.tv64) {
467 /* start the timer only the first time */ 449 /* send first RX_CHANGED to the user immediately */
468 mod_timer(&op->thrtimer, nexttx);
469 }
470
471 } else {
472 /* send RX_CHANGED to the user immediately */
473 bcm_rx_changed(op, rxdata); 450 bcm_rx_changed(op, rxdata);
451 op->kt_lastmsg = ktime_get();
452 return;
453 }
454
455 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
456 ktime_to_us(op->kt_ival2)) {
457 /* mark as 'throttled' and start timer */
458 lastdata->can_dlc |= RX_THR;
459 hrtimer_start(&op->thrtimer,
460 ktime_add(op->kt_lastmsg, op->kt_ival2),
461 HRTIMER_MODE_ABS);
462 return;
474 } 463 }
464
465 /* the gap was that big, that throttling was not needed here */
466 bcm_rx_changed(op, rxdata);
467 op->kt_lastmsg = ktime_get();
475} 468}
476 469
477/* 470/*
@@ -519,16 +512,16 @@ static void bcm_rx_starttimer(struct bcm_op *op)
519 if (op->flags & RX_NO_AUTOTIMER) 512 if (op->flags & RX_NO_AUTOTIMER)
520 return; 513 return;
521 514
522 if (op->j_ival1) 515 if (op->kt_ival1.tv64)
523 mod_timer(&op->timer, jiffies + op->j_ival1); 516 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
524} 517}
525 518
526/* 519/*
527 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out 520 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
528 */ 521 */
529static void bcm_rx_timeout_handler(unsigned long data) 522static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
530{ 523{
531 struct bcm_op *op = (struct bcm_op *)data; 524 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
532 struct bcm_msg_head msg_head; 525 struct bcm_msg_head msg_head;
533 526
534 msg_head.opcode = RX_TIMEOUT; 527 msg_head.opcode = RX_TIMEOUT;
@@ -548,27 +541,27 @@ static void bcm_rx_timeout_handler(unsigned long data)
548 /* clear received can_frames to indicate 'nothing received' */ 541 /* clear received can_frames to indicate 'nothing received' */
549 memset(op->last_frames, 0, op->nframes * CFSIZ); 542 memset(op->last_frames, 0, op->nframes * CFSIZ);
550 } 543 }
544
545 return HRTIMER_NORESTART;
551} 546}
552 547
553/* 548/*
554 * bcm_rx_thr_handler - the time for blocked content updates is over now: 549 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
555 * Check for throttled data and send it to the userspace
556 */ 550 */
557static void bcm_rx_thr_handler(unsigned long data) 551static int bcm_rx_thr_flush(struct bcm_op *op)
558{ 552{
559 struct bcm_op *op = (struct bcm_op *)data; 553 int updated = 0;
560 int i = 0;
561
562 /* mark disabled / consumed timer */
563 op->thrtimer.expires = 0;
564 554
565 if (op->nframes > 1) { 555 if (op->nframes > 1) {
556 int i;
557
566 /* for MUX filter we start at index 1 */ 558 /* for MUX filter we start at index 1 */
567 for (i = 1; i < op->nframes; i++) { 559 for (i = 1; i < op->nframes; i++) {
568 if ((op->last_frames) && 560 if ((op->last_frames) &&
569 (op->last_frames[i].can_dlc & RX_THR)) { 561 (op->last_frames[i].can_dlc & RX_THR)) {
570 op->last_frames[i].can_dlc &= ~RX_THR; 562 op->last_frames[i].can_dlc &= ~RX_THR;
571 bcm_rx_changed(op, &op->last_frames[i]); 563 bcm_rx_changed(op, &op->last_frames[i]);
564 updated++;
572 } 565 }
573 } 566 }
574 567
@@ -577,8 +570,29 @@ static void bcm_rx_thr_handler(unsigned long data)
577 if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) { 570 if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) {
578 op->last_frames[0].can_dlc &= ~RX_THR; 571 op->last_frames[0].can_dlc &= ~RX_THR;
579 bcm_rx_changed(op, &op->last_frames[0]); 572 bcm_rx_changed(op, &op->last_frames[0]);
573 updated++;
580 } 574 }
581 } 575 }
576
577 return updated;
578}
579
580/*
581 * bcm_rx_thr_handler - the time for blocked content updates is over now:
582 * Check for throttled data and send it to the userspace
583 */
584static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
585{
586 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
587
588 if (bcm_rx_thr_flush(op)) {
589 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
590 return HRTIMER_RESTART;
591 } else {
592 /* rearm throttle handling */
593 op->kt_lastmsg = ktime_set(0, 0);
594 return HRTIMER_NORESTART;
595 }
582} 596}
583 597
584/* 598/*
@@ -591,7 +605,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
591 int i; 605 int i;
592 606
593 /* disable timeout */ 607 /* disable timeout */
594 del_timer(&op->timer); 608 hrtimer_cancel(&op->timer);
595 609
596 if (skb->len == sizeof(rxframe)) { 610 if (skb->len == sizeof(rxframe)) {
597 memcpy(&rxframe, skb->data, sizeof(rxframe)); 611 memcpy(&rxframe, skb->data, sizeof(rxframe));
@@ -669,8 +683,8 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
669 683
670static void bcm_remove_op(struct bcm_op *op) 684static void bcm_remove_op(struct bcm_op *op)
671{ 685{
672 del_timer(&op->timer); 686 hrtimer_cancel(&op->timer);
673 del_timer(&op->thrtimer); 687 hrtimer_cancel(&op->thrtimer);
674 688
675 if ((op->frames) && (op->frames != &op->sframe)) 689 if ((op->frames) && (op->frames != &op->sframe))
676 kfree(op->frames); 690 kfree(op->frames);
@@ -871,11 +885,11 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
871 op->ifindex = ifindex; 885 op->ifindex = ifindex;
872 886
873 /* initialize uninitialized (kzalloc) structure */ 887 /* initialize uninitialized (kzalloc) structure */
874 setup_timer(&op->timer, bcm_tx_timeout_handler, 888 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
875 (unsigned long)op); 889 op->timer.function = bcm_tx_timeout_handler;
876 890
877 /* currently unused in tx_ops */ 891 /* currently unused in tx_ops */
878 init_timer(&op->thrtimer); 892 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
879 893
880 /* add this bcm_op to the list of the tx_ops */ 894 /* add this bcm_op to the list of the tx_ops */
881 list_add(&op->list, &bo->tx_ops); 895 list_add(&op->list, &bo->tx_ops);
@@ -902,25 +916,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
902 op->count = msg_head->count; 916 op->count = msg_head->count;
903 op->ival1 = msg_head->ival1; 917 op->ival1 = msg_head->ival1;
904 op->ival2 = msg_head->ival2; 918 op->ival2 = msg_head->ival2;
905 op->j_ival1 = rounded_tv2jif(&msg_head->ival1); 919 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
906 op->j_ival2 = rounded_tv2jif(&msg_head->ival2); 920 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
907 921
908 /* disable an active timer due to zero values? */ 922 /* disable an active timer due to zero values? */
909 if (!op->j_ival1 && !op->j_ival2) 923 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
910 del_timer(&op->timer); 924 hrtimer_cancel(&op->timer);
911 } 925 }
912 926
913 if ((op->flags & STARTTIMER) && 927 if ((op->flags & STARTTIMER) &&
914 ((op->j_ival1 && op->count) || op->j_ival2)) { 928 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
915 929
916 /* spec: send can_frame when starting timer */ 930 /* spec: send can_frame when starting timer */
917 op->flags |= TX_ANNOUNCE; 931 op->flags |= TX_ANNOUNCE;
918 932
919 if (op->j_ival1 && (op->count > 0)) { 933 if (op->kt_ival1.tv64 && (op->count > 0)) {
920 /* op->count-- is done in bcm_tx_timeout_handler */ 934 /* op->count-- is done in bcm_tx_timeout_handler */
921 mod_timer(&op->timer, jiffies + op->j_ival1); 935 hrtimer_start(&op->timer, op->kt_ival1,
936 HRTIMER_MODE_REL);
922 } else 937 } else
923 mod_timer(&op->timer, jiffies + op->j_ival2); 938 hrtimer_start(&op->timer, op->kt_ival2,
939 HRTIMER_MODE_REL);
924 } 940 }
925 941
926 if (op->flags & TX_ANNOUNCE) 942 if (op->flags & TX_ANNOUNCE)
@@ -1032,15 +1048,11 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1032 op->ifindex = ifindex; 1048 op->ifindex = ifindex;
1033 1049
1034 /* initialize uninitialized (kzalloc) structure */ 1050 /* initialize uninitialized (kzalloc) structure */
1035 setup_timer(&op->timer, bcm_rx_timeout_handler, 1051 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1036 (unsigned long)op); 1052 op->timer.function = bcm_rx_timeout_handler;
1037 1053
1038 /* init throttle timer for RX_CHANGED */ 1054 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1039 setup_timer(&op->thrtimer, bcm_rx_thr_handler, 1055 op->thrtimer.function = bcm_rx_thr_handler;
1040 (unsigned long)op);
1041
1042 /* mark disabled timer */
1043 op->thrtimer.expires = 0;
1044 1056
1045 /* add this bcm_op to the list of the rx_ops */ 1057 /* add this bcm_op to the list of the rx_ops */
1046 list_add(&op->list, &bo->rx_ops); 1058 list_add(&op->list, &bo->rx_ops);
@@ -1056,8 +1068,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1056 if (op->flags & RX_RTR_FRAME) { 1068 if (op->flags & RX_RTR_FRAME) {
1057 1069
1058 /* no timers in RTR-mode */ 1070 /* no timers in RTR-mode */
1059 del_timer(&op->thrtimer); 1071 hrtimer_cancel(&op->thrtimer);
1060 del_timer(&op->timer); 1072 hrtimer_cancel(&op->timer);
1061 1073
1062 /* 1074 /*
1063 * funny feature in RX(!)_SETUP only for RTR-mode: 1075 * funny feature in RX(!)_SETUP only for RTR-mode:
@@ -1074,28 +1086,25 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1074 /* set timer value */ 1086 /* set timer value */
1075 op->ival1 = msg_head->ival1; 1087 op->ival1 = msg_head->ival1;
1076 op->ival2 = msg_head->ival2; 1088 op->ival2 = msg_head->ival2;
1077 op->j_ival1 = rounded_tv2jif(&msg_head->ival1); 1089 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1078 op->j_ival2 = rounded_tv2jif(&msg_head->ival2); 1090 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1079 1091
1080 /* disable an active timer due to zero value? */ 1092 /* disable an active timer due to zero value? */
1081 if (!op->j_ival1) 1093 if (!op->kt_ival1.tv64)
1082 del_timer(&op->timer); 1094 hrtimer_cancel(&op->timer);
1083
1084 /* free currently blocked msgs ? */
1085 if (op->thrtimer.expires) {
1086 /* send blocked msgs hereafter */
1087 mod_timer(&op->thrtimer, jiffies + 2);
1088 }
1089 1095
1090 /* 1096 /*
1091 * if (op->j_ival2) is zero, no (new) throttling 1097 * In any case cancel the throttle timer, flush
1092 * will happen. For details see functions 1098 * potentially blocked msgs and reset throttle handling
1093 * bcm_rx_update_and_send() and bcm_rx_thr_handler()
1094 */ 1099 */
1100 op->kt_lastmsg = ktime_set(0, 0);
1101 hrtimer_cancel(&op->thrtimer);
1102 bcm_rx_thr_flush(op);
1095 } 1103 }
1096 1104
1097 if ((op->flags & STARTTIMER) && op->j_ival1) 1105 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1098 mod_timer(&op->timer, jiffies + op->j_ival1); 1106 hrtimer_start(&op->timer, op->kt_ival1,
1107 HRTIMER_MODE_REL);
1099 } 1108 }
1100 1109
1101 /* now we can register for can_ids, if we added a new bcm_op */ 1110 /* now we can register for can_ids, if we added a new bcm_op */
@@ -1285,7 +1294,7 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1285 struct bcm_op *op; 1294 struct bcm_op *op;
1286 int notify_enodev = 0; 1295 int notify_enodev = 0;
1287 1296
1288 if (dev->nd_net != &init_net) 1297 if (dev_net(dev) != &init_net)
1289 return NOTIFY_DONE; 1298 return NOTIFY_DONE;
1290 1299
1291 if (dev->type != ARPHRD_CAN) 1300 if (dev->type != ARPHRD_CAN)
diff --git a/net/can/raw.c b/net/can/raw.c
index 94cd7f27c444..ead50c7c0d40 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -210,7 +210,7 @@ static int raw_notifier(struct notifier_block *nb,
210 struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); 210 struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
211 struct sock *sk = &ro->sk; 211 struct sock *sk = &ro->sk;
212 212
213 if (dev->nd_net != &init_net) 213 if (dev_net(dev) != &init_net)
214 return NOTIFY_DONE; 214 return NOTIFY_DONE;
215 215
216 if (dev->type != ARPHRD_CAN) 216 if (dev->type != ARPHRD_CAN)
diff --git a/net/core/dev.c b/net/core/dev.c
index 460e7f99ce3e..e1df1ab3e04a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -216,7 +216,7 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
216/* Device list insertion */ 216/* Device list insertion */
217static int list_netdevice(struct net_device *dev) 217static int list_netdevice(struct net_device *dev)
218{ 218{
219 struct net *net = dev->nd_net; 219 struct net *net = dev_net(dev);
220 220
221 ASSERT_RTNL(); 221 ASSERT_RTNL();
222 222
@@ -852,8 +852,8 @@ int dev_alloc_name(struct net_device *dev, const char *name)
852 struct net *net; 852 struct net *net;
853 int ret; 853 int ret;
854 854
855 BUG_ON(!dev->nd_net); 855 BUG_ON(!dev_net(dev));
856 net = dev->nd_net; 856 net = dev_net(dev);
857 ret = __dev_alloc_name(net, name, buf); 857 ret = __dev_alloc_name(net, name, buf);
858 if (ret >= 0) 858 if (ret >= 0)
859 strlcpy(dev->name, buf, IFNAMSIZ); 859 strlcpy(dev->name, buf, IFNAMSIZ);
@@ -877,9 +877,9 @@ int dev_change_name(struct net_device *dev, char *newname)
877 struct net *net; 877 struct net *net;
878 878
879 ASSERT_RTNL(); 879 ASSERT_RTNL();
880 BUG_ON(!dev->nd_net); 880 BUG_ON(!dev_net(dev));
881 881
882 net = dev->nd_net; 882 net = dev_net(dev);
883 if (dev->flags & IFF_UP) 883 if (dev->flags & IFF_UP)
884 return -EBUSY; 884 return -EBUSY;
885 885
@@ -2615,7 +2615,7 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
2615 2615
2616 if (v == SEQ_START_TOKEN) 2616 if (v == SEQ_START_TOKEN)
2617 seq_puts(seq, "Type Device Function\n"); 2617 seq_puts(seq, "Type Device Function\n");
2618 else { 2618 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2619 if (pt->type == htons(ETH_P_ALL)) 2619 if (pt->type == htons(ETH_P_ALL))
2620 seq_puts(seq, "ALL "); 2620 seq_puts(seq, "ALL ");
2621 else 2621 else
@@ -2639,7 +2639,8 @@ static const struct seq_operations ptype_seq_ops = {
2639 2639
2640static int ptype_seq_open(struct inode *inode, struct file *file) 2640static int ptype_seq_open(struct inode *inode, struct file *file)
2641{ 2641{
2642 return seq_open(file, &ptype_seq_ops); 2642 return seq_open_net(inode, file, &ptype_seq_ops,
2643 sizeof(struct seq_net_private));
2643} 2644}
2644 2645
2645static const struct file_operations ptype_seq_fops = { 2646static const struct file_operations ptype_seq_fops = {
@@ -2647,7 +2648,7 @@ static const struct file_operations ptype_seq_fops = {
2647 .open = ptype_seq_open, 2648 .open = ptype_seq_open,
2648 .read = seq_read, 2649 .read = seq_read,
2649 .llseek = seq_lseek, 2650 .llseek = seq_lseek,
2650 .release = seq_release, 2651 .release = seq_release_net,
2651}; 2652};
2652 2653
2653 2654
@@ -3688,8 +3689,8 @@ int register_netdevice(struct net_device *dev)
3688 3689
3689 /* When net_device's are persistent, this will be fatal. */ 3690 /* When net_device's are persistent, this will be fatal. */
3690 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 3691 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3691 BUG_ON(!dev->nd_net); 3692 BUG_ON(!dev_net(dev));
3692 net = dev->nd_net; 3693 net = dev_net(dev);
3693 3694
3694 spin_lock_init(&dev->queue_lock); 3695 spin_lock_init(&dev->queue_lock);
3695 spin_lock_init(&dev->_xmit_lock); 3696 spin_lock_init(&dev->_xmit_lock);
@@ -3995,11 +3996,15 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3995 3996
3996 BUG_ON(strlen(name) >= sizeof(dev->name)); 3997 BUG_ON(strlen(name) >= sizeof(dev->name));
3997 3998
3998 /* ensure 32-byte alignment of both the device and private area */ 3999 alloc_size = sizeof(struct net_device) +
3999 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST + 4000 sizeof(struct net_device_subqueue) * (queue_count - 1);
4000 (sizeof(struct net_device_subqueue) * (queue_count - 1))) & 4001 if (sizeof_priv) {
4001 ~NETDEV_ALIGN_CONST; 4002 /* ensure 32-byte alignment of private area */
4002 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; 4003 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4004 alloc_size += sizeof_priv;
4005 }
4006 /* ensure 32-byte alignment of whole construct */
4007 alloc_size += NETDEV_ALIGN_CONST;
4003 4008
4004 p = kzalloc(alloc_size, GFP_KERNEL); 4009 p = kzalloc(alloc_size, GFP_KERNEL);
4005 if (!p) { 4010 if (!p) {
@@ -4010,7 +4015,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4010 dev = (struct net_device *) 4015 dev = (struct net_device *)
4011 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 4016 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4012 dev->padded = (char *)dev - (char *)p; 4017 dev->padded = (char *)dev - (char *)p;
4013 dev->nd_net = &init_net; 4018 dev_net_set(dev, &init_net);
4014 4019
4015 if (sizeof_priv) { 4020 if (sizeof_priv) {
4016 dev->priv = ((char *)dev + 4021 dev->priv = ((char *)dev +
@@ -4021,6 +4026,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4021 } 4026 }
4022 4027
4023 dev->egress_subqueue_count = queue_count; 4028 dev->egress_subqueue_count = queue_count;
4029 dev->gso_max_size = GSO_MAX_SIZE;
4024 4030
4025 dev->get_stats = internal_stats; 4031 dev->get_stats = internal_stats;
4026 netpoll_netdev_init(dev); 4032 netpoll_netdev_init(dev);
@@ -4040,6 +4046,8 @@ EXPORT_SYMBOL(alloc_netdev_mq);
4040 */ 4046 */
4041void free_netdev(struct net_device *dev) 4047void free_netdev(struct net_device *dev)
4042{ 4048{
4049 release_net(dev_net(dev));
4050
4043 /* Compatibility with error handling in drivers */ 4051 /* Compatibility with error handling in drivers */
4044 if (dev->reg_state == NETREG_UNINITIALIZED) { 4052 if (dev->reg_state == NETREG_UNINITIALIZED) {
4045 kfree((char *)dev - dev->padded); 4053 kfree((char *)dev - dev->padded);
@@ -4134,7 +4142,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
4134 4142
4135 /* Get out if there is nothing todo */ 4143 /* Get out if there is nothing todo */
4136 err = 0; 4144 err = 0;
4137 if (dev->nd_net == net) 4145 if (net_eq(dev_net(dev), net))
4138 goto out; 4146 goto out;
4139 4147
4140 /* Pick the destination device name, and ensure 4148 /* Pick the destination device name, and ensure
@@ -4185,7 +4193,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
4185 dev_addr_discard(dev); 4193 dev_addr_discard(dev);
4186 4194
4187 /* Actually switch the network namespace */ 4195 /* Actually switch the network namespace */
4188 dev->nd_net = net; 4196 dev_net_set(dev, net);
4189 4197
4190 /* Assign the new device name */ 4198 /* Assign the new device name */
4191 if (destname != dev->name) 4199 if (destname != dev->name)
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index cec582563e0d..f8a3455f4493 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -156,39 +156,14 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
156EXPORT_SYMBOL(dev_mc_unsync); 156EXPORT_SYMBOL(dev_mc_unsync);
157 157
158#ifdef CONFIG_PROC_FS 158#ifdef CONFIG_PROC_FS
159static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
160 __acquires(dev_base_lock)
161{
162 struct net *net = seq_file_net(seq);
163 struct net_device *dev;
164 loff_t off = 0;
165
166 read_lock(&dev_base_lock);
167 for_each_netdev(net, dev) {
168 if (off++ == *pos)
169 return dev;
170 }
171 return NULL;
172}
173
174static void *dev_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
175{
176 ++*pos;
177 return next_net_device((struct net_device *)v);
178}
179
180static void dev_mc_seq_stop(struct seq_file *seq, void *v)
181 __releases(dev_base_lock)
182{
183 read_unlock(&dev_base_lock);
184}
185
186
187static int dev_mc_seq_show(struct seq_file *seq, void *v) 159static int dev_mc_seq_show(struct seq_file *seq, void *v)
188{ 160{
189 struct dev_addr_list *m; 161 struct dev_addr_list *m;
190 struct net_device *dev = v; 162 struct net_device *dev = v;
191 163
164 if (v == SEQ_START_TOKEN)
165 return 0;
166
192 netif_tx_lock_bh(dev); 167 netif_tx_lock_bh(dev);
193 for (m = dev->mc_list; m; m = m->next) { 168 for (m = dev->mc_list; m; m = m->next) {
194 int i; 169 int i;
@@ -206,9 +181,9 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
206} 181}
207 182
208static const struct seq_operations dev_mc_seq_ops = { 183static const struct seq_operations dev_mc_seq_ops = {
209 .start = dev_mc_seq_start, 184 .start = dev_seq_start,
210 .next = dev_mc_seq_next, 185 .next = dev_seq_next,
211 .stop = dev_mc_seq_stop, 186 .stop = dev_seq_stop,
212 .show = dev_mc_seq_show, 187 .show = dev_mc_seq_show,
213}; 188};
214 189
diff --git a/net/core/dst.c b/net/core/dst.c
index 7deef483c79f..fe03266130b6 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -259,6 +259,16 @@ again:
259 return NULL; 259 return NULL;
260} 260}
261 261
262void dst_release(struct dst_entry *dst)
263{
264 if (dst) {
265 WARN_ON(atomic_read(&dst->__refcnt) < 1);
266 smp_mb__before_atomic_dec();
267 atomic_dec(&dst->__refcnt);
268 }
269}
270EXPORT_SYMBOL(dst_release);
271
262/* Dirty hack. We did it in 2.2 (in __dst_free), 272/* Dirty hack. We did it in 2.2 (in __dst_free),
263 * we have _very_ good reasons not to repeat 273 * we have _very_ good reasons not to repeat
264 * this mistake in 2.3, but we have no choice 274 * this mistake in 2.3, but we have no choice
@@ -279,7 +289,7 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
279 if (!unregister) { 289 if (!unregister) {
280 dst->input = dst->output = dst_discard; 290 dst->input = dst->output = dst_discard;
281 } else { 291 } else {
282 dst->dev = dst->dev->nd_net->loopback_dev; 292 dst->dev = dev_net(dst->dev)->loopback_dev;
283 dev_hold(dst->dev); 293 dev_hold(dst->dev);
284 dev_put(dev); 294 dev_put(dev);
285 if (dst->neighbour && dst->neighbour->dev == dev) { 295 if (dst->neighbour && dst->neighbour->dev == dev) {
@@ -295,9 +305,6 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void
295 struct net_device *dev = ptr; 305 struct net_device *dev = ptr;
296 struct dst_entry *dst, *last = NULL; 306 struct dst_entry *dst, *last = NULL;
297 307
298 if (dev->nd_net != &init_net)
299 return NOTIFY_DONE;
300
301 switch (event) { 308 switch (event) {
302 case NETDEV_UNREGISTER: 309 case NETDEV_UNREGISTER:
303 case NETDEV_DOWN: 310 case NETDEV_DOWN:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 1163eb2256d0..a29b43d0b450 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -284,8 +284,10 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
284{ 284{
285 struct ethtool_eeprom eeprom; 285 struct ethtool_eeprom eeprom;
286 const struct ethtool_ops *ops = dev->ethtool_ops; 286 const struct ethtool_ops *ops = dev->ethtool_ops;
287 void __user *userbuf = useraddr + sizeof(eeprom);
288 u32 bytes_remaining;
287 u8 *data; 289 u8 *data;
288 int ret; 290 int ret = 0;
289 291
290 if (!ops->get_eeprom || !ops->get_eeprom_len) 292 if (!ops->get_eeprom || !ops->get_eeprom_len)
291 return -EOPNOTSUPP; 293 return -EOPNOTSUPP;
@@ -301,26 +303,26 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
301 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) 303 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
302 return -EINVAL; 304 return -EINVAL;
303 305
304 data = kmalloc(eeprom.len, GFP_USER); 306 data = kmalloc(PAGE_SIZE, GFP_USER);
305 if (!data) 307 if (!data)
306 return -ENOMEM; 308 return -ENOMEM;
307 309
308 ret = -EFAULT; 310 bytes_remaining = eeprom.len;
309 if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) 311 while (bytes_remaining > 0) {
310 goto out; 312 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
311
312 ret = ops->get_eeprom(dev, &eeprom, data);
313 if (ret)
314 goto out;
315 313
316 ret = -EFAULT; 314 ret = ops->get_eeprom(dev, &eeprom, data);
317 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) 315 if (ret)
318 goto out; 316 break;
319 if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) 317 if (copy_to_user(userbuf, data, eeprom.len)) {
320 goto out; 318 ret = -EFAULT;
321 ret = 0; 319 break;
320 }
321 userbuf += eeprom.len;
322 eeprom.offset += eeprom.len;
323 bytes_remaining -= eeprom.len;
324 }
322 325
323 out:
324 kfree(data); 326 kfree(data);
325 return ret; 327 return ret;
326} 328}
@@ -329,8 +331,10 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
329{ 331{
330 struct ethtool_eeprom eeprom; 332 struct ethtool_eeprom eeprom;
331 const struct ethtool_ops *ops = dev->ethtool_ops; 333 const struct ethtool_ops *ops = dev->ethtool_ops;
334 void __user *userbuf = useraddr + sizeof(eeprom);
335 u32 bytes_remaining;
332 u8 *data; 336 u8 *data;
333 int ret; 337 int ret = 0;
334 338
335 if (!ops->set_eeprom || !ops->get_eeprom_len) 339 if (!ops->set_eeprom || !ops->get_eeprom_len)
336 return -EOPNOTSUPP; 340 return -EOPNOTSUPP;
@@ -346,22 +350,26 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
346 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) 350 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
347 return -EINVAL; 351 return -EINVAL;
348 352
349 data = kmalloc(eeprom.len, GFP_USER); 353 data = kmalloc(PAGE_SIZE, GFP_USER);
350 if (!data) 354 if (!data)
351 return -ENOMEM; 355 return -ENOMEM;
352 356
353 ret = -EFAULT; 357 bytes_remaining = eeprom.len;
354 if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) 358 while (bytes_remaining > 0) {
355 goto out; 359 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
356
357 ret = ops->set_eeprom(dev, &eeprom, data);
358 if (ret)
359 goto out;
360 360
361 if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) 361 if (copy_from_user(data, userbuf, eeprom.len)) {
362 ret = -EFAULT; 362 ret = -EFAULT;
363 break;
364 }
365 ret = ops->set_eeprom(dev, &eeprom, data);
366 if (ret)
367 break;
368 userbuf += eeprom.len;
369 eeprom.offset += eeprom.len;
370 bytes_remaining -= eeprom.len;
371 }
363 372
364 out:
365 kfree(data); 373 kfree(data);
366 return ret; 374 return ret;
367} 375}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 42ccaf5b8509..e3e9ab0f74e3 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -29,7 +29,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
29 r->pref = pref; 29 r->pref = pref;
30 r->table = table; 30 r->table = table;
31 r->flags = flags; 31 r->flags = flags;
32 r->fr_net = ops->fro_net; 32 r->fr_net = hold_net(ops->fro_net);
33 33
34 /* The lock is not required here, the list in unreacheable 34 /* The lock is not required here, the list in unreacheable
35 * at the moment this function is called */ 35 * at the moment this function is called */
@@ -214,7 +214,7 @@ errout:
214 214
215static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 215static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
216{ 216{
217 struct net *net = skb->sk->sk_net; 217 struct net *net = sock_net(skb->sk);
218 struct fib_rule_hdr *frh = nlmsg_data(nlh); 218 struct fib_rule_hdr *frh = nlmsg_data(nlh);
219 struct fib_rules_ops *ops = NULL; 219 struct fib_rules_ops *ops = NULL;
220 struct fib_rule *rule, *r, *last = NULL; 220 struct fib_rule *rule, *r, *last = NULL;
@@ -243,7 +243,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
243 err = -ENOMEM; 243 err = -ENOMEM;
244 goto errout; 244 goto errout;
245 } 245 }
246 rule->fr_net = net; 246 rule->fr_net = hold_net(net);
247 247
248 if (tb[FRA_PRIORITY]) 248 if (tb[FRA_PRIORITY])
249 rule->pref = nla_get_u32(tb[FRA_PRIORITY]); 249 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
@@ -344,6 +344,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
344 return 0; 344 return 0;
345 345
346errout_free: 346errout_free:
347 release_net(rule->fr_net);
347 kfree(rule); 348 kfree(rule);
348errout: 349errout:
349 rules_ops_put(ops); 350 rules_ops_put(ops);
@@ -352,7 +353,7 @@ errout:
352 353
353static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 354static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
354{ 355{
355 struct net *net = skb->sk->sk_net; 356 struct net *net = sock_net(skb->sk);
356 struct fib_rule_hdr *frh = nlmsg_data(nlh); 357 struct fib_rule_hdr *frh = nlmsg_data(nlh);
357 struct fib_rules_ops *ops = NULL; 358 struct fib_rules_ops *ops = NULL;
358 struct fib_rule *rule, *tmp; 359 struct fib_rule *rule, *tmp;
@@ -534,7 +535,7 @@ skip:
534 535
535static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 536static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
536{ 537{
537 struct net *net = skb->sk->sk_net; 538 struct net *net = sock_net(skb->sk);
538 struct fib_rules_ops *ops; 539 struct fib_rules_ops *ops;
539 int idx = 0, family; 540 int idx = 0, family;
540 541
@@ -618,7 +619,7 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
618 void *ptr) 619 void *ptr)
619{ 620{
620 struct net_device *dev = ptr; 621 struct net_device *dev = ptr;
621 struct net *net = dev->nd_net; 622 struct net *net = dev_net(dev);
622 struct fib_rules_ops *ops; 623 struct fib_rules_ops *ops;
623 624
624 ASSERT_RTNL(); 625 ASSERT_RTNL();
diff --git a/net/core/filter.c b/net/core/filter.c
index e0a06942c025..f5f3cf603064 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -27,6 +27,7 @@
27#include <linux/if_packet.h> 27#include <linux/if_packet.h>
28#include <net/ip.h> 28#include <net/ip.h>
29#include <net/protocol.h> 29#include <net/protocol.h>
30#include <net/netlink.h>
30#include <linux/skbuff.h> 31#include <linux/skbuff.h>
31#include <net/sock.h> 32#include <net/sock.h>
32#include <linux/errno.h> 33#include <linux/errno.h>
@@ -64,6 +65,41 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
64} 65}
65 66
66/** 67/**
68 * sk_filter - run a packet through a socket filter
69 * @sk: sock associated with &sk_buff
70 * @skb: buffer to filter
71 * @needlock: set to 1 if the sock is not locked by caller.
72 *
73 * Run the filter code and then cut skb->data to correct size returned by
74 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
75 * than pkt_len we keep whole skb->data. This is the socket level
76 * wrapper to sk_run_filter. It returns 0 if the packet should
77 * be accepted or -EPERM if the packet should be tossed.
78 *
79 */
80int sk_filter(struct sock *sk, struct sk_buff *skb)
81{
82 int err;
83 struct sk_filter *filter;
84
85 err = security_sock_rcv_skb(sk, skb);
86 if (err)
87 return err;
88
89 rcu_read_lock_bh();
90 filter = rcu_dereference(sk->sk_filter);
91 if (filter) {
92 unsigned int pkt_len = sk_run_filter(skb, filter->insns,
93 filter->len);
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
95 }
96 rcu_read_unlock_bh();
97
98 return err;
99}
100EXPORT_SYMBOL(sk_filter);
101
102/**
67 * sk_run_filter - run a filter on a socket 103 * sk_run_filter - run a filter on a socket
68 * @skb: buffer to run the filter on 104 * @skb: buffer to run the filter on
69 * @filter: filter to apply 105 * @filter: filter to apply
@@ -268,6 +304,22 @@ load_b:
268 case SKF_AD_IFINDEX: 304 case SKF_AD_IFINDEX:
269 A = skb->dev->ifindex; 305 A = skb->dev->ifindex;
270 continue; 306 continue;
307 case SKF_AD_NLATTR: {
308 struct nlattr *nla;
309
310 if (skb_is_nonlinear(skb))
311 return 0;
312 if (A > skb->len - sizeof(struct nlattr))
313 return 0;
314
315 nla = nla_find((struct nlattr *)&skb->data[A],
316 skb->len - A, X);
317 if (nla)
318 A = (void *)nla - (void *)skb->data;
319 else
320 A = 0;
321 continue;
322 }
271 default: 323 default:
272 return 0; 324 return 0;
273 } 325 }
@@ -275,6 +327,7 @@ load_b:
275 327
276 return 0; 328 return 0;
277} 329}
330EXPORT_SYMBOL(sk_run_filter);
278 331
279/** 332/**
280 * sk_chk_filter - verify socket filter code 333 * sk_chk_filter - verify socket filter code
@@ -385,6 +438,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
385 438
386 return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; 439 return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
387} 440}
441EXPORT_SYMBOL(sk_chk_filter);
388 442
389/** 443/**
390 * sk_filter_rcu_release: Release a socket filter by rcu_head 444 * sk_filter_rcu_release: Release a socket filter by rcu_head
@@ -467,6 +521,3 @@ int sk_detach_filter(struct sock *sk)
467 rcu_read_unlock_bh(); 521 rcu_read_unlock_bh();
468 return ret; 522 return ret;
469} 523}
470
471EXPORT_SYMBOL(sk_chk_filter);
472EXPORT_SYMBOL(sk_run_filter);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 19b8e003f150..75075c303c44 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -123,6 +123,7 @@ unsigned long neigh_rand_reach_time(unsigned long base)
123{ 123{
124 return (base ? (net_random() % base) + (base >> 1) : 0); 124 return (base ? (net_random() % base) + (base >> 1) : 0);
125} 125}
126EXPORT_SYMBOL(neigh_rand_reach_time);
126 127
127 128
128static int neigh_forced_gc(struct neigh_table *tbl) 129static int neigh_forced_gc(struct neigh_table *tbl)
@@ -241,6 +242,7 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
241 neigh_flush_dev(tbl, dev); 242 neigh_flush_dev(tbl, dev);
242 write_unlock_bh(&tbl->lock); 243 write_unlock_bh(&tbl->lock);
243} 244}
245EXPORT_SYMBOL(neigh_changeaddr);
244 246
245int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 247int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
246{ 248{
@@ -253,6 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
253 pneigh_queue_purge(&tbl->proxy_queue); 255 pneigh_queue_purge(&tbl->proxy_queue);
254 return 0; 256 return 0;
255} 257}
258EXPORT_SYMBOL(neigh_ifdown);
256 259
257static struct neighbour *neigh_alloc(struct neigh_table *tbl) 260static struct neighbour *neigh_alloc(struct neigh_table *tbl)
258{ 261{
@@ -374,6 +377,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
374 read_unlock_bh(&tbl->lock); 377 read_unlock_bh(&tbl->lock);
375 return n; 378 return n;
376} 379}
380EXPORT_SYMBOL(neigh_lookup);
377 381
378struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 382struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
379 const void *pkey) 383 const void *pkey)
@@ -388,7 +392,7 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
388 hash_val = tbl->hash(pkey, NULL); 392 hash_val = tbl->hash(pkey, NULL);
389 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) { 393 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
390 if (!memcmp(n->primary_key, pkey, key_len) && 394 if (!memcmp(n->primary_key, pkey, key_len) &&
391 (net == n->dev->nd_net)) { 395 net_eq(dev_net(n->dev), net)) {
392 neigh_hold(n); 396 neigh_hold(n);
393 NEIGH_CACHE_STAT_INC(tbl, hits); 397 NEIGH_CACHE_STAT_INC(tbl, hits);
394 break; 398 break;
@@ -397,6 +401,7 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
397 read_unlock_bh(&tbl->lock); 401 read_unlock_bh(&tbl->lock);
398 return n; 402 return n;
399} 403}
404EXPORT_SYMBOL(neigh_lookup_nodev);
400 405
401struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, 406struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
402 struct net_device *dev) 407 struct net_device *dev)
@@ -465,28 +470,44 @@ out_neigh_release:
465 neigh_release(n); 470 neigh_release(n);
466 goto out; 471 goto out;
467} 472}
473EXPORT_SYMBOL(neigh_create);
468 474
469struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, 475static u32 pneigh_hash(const void *pkey, int key_len)
470 struct net *net, const void *pkey, struct net_device *dev)
471{ 476{
472 struct pneigh_entry *n;
473 int key_len = tbl->key_len;
474 u32 hash_val = *(u32 *)(pkey + key_len - 4); 477 u32 hash_val = *(u32 *)(pkey + key_len - 4);
475
476 hash_val ^= (hash_val >> 16); 478 hash_val ^= (hash_val >> 16);
477 hash_val ^= hash_val >> 8; 479 hash_val ^= hash_val >> 8;
478 hash_val ^= hash_val >> 4; 480 hash_val ^= hash_val >> 4;
479 hash_val &= PNEIGH_HASHMASK; 481 hash_val &= PNEIGH_HASHMASK;
482 return hash_val;
483}
480 484
481 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) { 485static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
486 struct net *net,
487 const void *pkey,
488 int key_len,
489 struct net_device *dev)
490{
491 while (n) {
482 if (!memcmp(n->key, pkey, key_len) && 492 if (!memcmp(n->key, pkey, key_len) &&
483 (n->net == net) && 493 net_eq(pneigh_net(n), net) &&
484 (n->dev == dev || !n->dev)) 494 (n->dev == dev || !n->dev))
485 break; 495 return n;
496 n = n->next;
486 } 497 }
498 return NULL;
499}
487 500
488 return n; 501struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
502 struct net *net, const void *pkey, struct net_device *dev)
503{
504 int key_len = tbl->key_len;
505 u32 hash_val = pneigh_hash(pkey, key_len);
506
507 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
508 net, pkey, key_len, dev);
489} 509}
510EXPORT_SYMBOL_GPL(__pneigh_lookup);
490 511
491struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, 512struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
492 struct net *net, const void *pkey, 513 struct net *net, const void *pkey,
@@ -494,26 +515,14 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
494{ 515{
495 struct pneigh_entry *n; 516 struct pneigh_entry *n;
496 int key_len = tbl->key_len; 517 int key_len = tbl->key_len;
497 u32 hash_val = *(u32 *)(pkey + key_len - 4); 518 u32 hash_val = pneigh_hash(pkey, key_len);
498
499 hash_val ^= (hash_val >> 16);
500 hash_val ^= hash_val >> 8;
501 hash_val ^= hash_val >> 4;
502 hash_val &= PNEIGH_HASHMASK;
503 519
504 read_lock_bh(&tbl->lock); 520 read_lock_bh(&tbl->lock);
505 521 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
506 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) { 522 net, pkey, key_len, dev);
507 if (!memcmp(n->key, pkey, key_len) &&
508 (n->net == net) &&
509 (n->dev == dev || !n->dev)) {
510 read_unlock_bh(&tbl->lock);
511 goto out;
512 }
513 }
514 read_unlock_bh(&tbl->lock); 523 read_unlock_bh(&tbl->lock);
515 n = NULL; 524
516 if (!creat) 525 if (n || !creat)
517 goto out; 526 goto out;
518 527
519 ASSERT_RTNL(); 528 ASSERT_RTNL();
@@ -522,7 +531,9 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
522 if (!n) 531 if (!n)
523 goto out; 532 goto out;
524 533
534#ifdef CONFIG_NET_NS
525 n->net = hold_net(net); 535 n->net = hold_net(net);
536#endif
526 memcpy(n->key, pkey, key_len); 537 memcpy(n->key, pkey, key_len);
527 n->dev = dev; 538 n->dev = dev;
528 if (dev) 539 if (dev)
@@ -544,6 +555,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
544out: 555out:
545 return n; 556 return n;
546} 557}
558EXPORT_SYMBOL(pneigh_lookup);
547 559
548 560
549int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, 561int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
@@ -551,25 +563,20 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
551{ 563{
552 struct pneigh_entry *n, **np; 564 struct pneigh_entry *n, **np;
553 int key_len = tbl->key_len; 565 int key_len = tbl->key_len;
554 u32 hash_val = *(u32 *)(pkey + key_len - 4); 566 u32 hash_val = pneigh_hash(pkey, key_len);
555
556 hash_val ^= (hash_val >> 16);
557 hash_val ^= hash_val >> 8;
558 hash_val ^= hash_val >> 4;
559 hash_val &= PNEIGH_HASHMASK;
560 567
561 write_lock_bh(&tbl->lock); 568 write_lock_bh(&tbl->lock);
562 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; 569 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
563 np = &n->next) { 570 np = &n->next) {
564 if (!memcmp(n->key, pkey, key_len) && n->dev == dev && 571 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
565 (n->net == net)) { 572 net_eq(pneigh_net(n), net)) {
566 *np = n->next; 573 *np = n->next;
567 write_unlock_bh(&tbl->lock); 574 write_unlock_bh(&tbl->lock);
568 if (tbl->pdestructor) 575 if (tbl->pdestructor)
569 tbl->pdestructor(n); 576 tbl->pdestructor(n);
570 if (n->dev) 577 if (n->dev)
571 dev_put(n->dev); 578 dev_put(n->dev);
572 release_net(n->net); 579 release_net(pneigh_net(n));
573 kfree(n); 580 kfree(n);
574 return 0; 581 return 0;
575 } 582 }
@@ -592,7 +599,7 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
592 tbl->pdestructor(n); 599 tbl->pdestructor(n);
593 if (n->dev) 600 if (n->dev)
594 dev_put(n->dev); 601 dev_put(n->dev);
595 release_net(n->net); 602 release_net(pneigh_net(n));
596 kfree(n); 603 kfree(n);
597 continue; 604 continue;
598 } 605 }
@@ -651,6 +658,7 @@ void neigh_destroy(struct neighbour *neigh)
651 atomic_dec(&neigh->tbl->entries); 658 atomic_dec(&neigh->tbl->entries);
652 kmem_cache_free(neigh->tbl->kmem_cachep, neigh); 659 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
653} 660}
661EXPORT_SYMBOL(neigh_destroy);
654 662
655/* Neighbour state is suspicious; 663/* Neighbour state is suspicious;
656 disable fast path. 664 disable fast path.
@@ -931,6 +939,7 @@ out_unlock_bh:
931 write_unlock_bh(&neigh->lock); 939 write_unlock_bh(&neigh->lock);
932 return rc; 940 return rc;
933} 941}
942EXPORT_SYMBOL(__neigh_event_send);
934 943
935static void neigh_update_hhs(struct neighbour *neigh) 944static void neigh_update_hhs(struct neighbour *neigh)
936{ 945{
@@ -1103,6 +1112,7 @@ out:
1103 1112
1104 return err; 1113 return err;
1105} 1114}
1115EXPORT_SYMBOL(neigh_update);
1106 1116
1107struct neighbour *neigh_event_ns(struct neigh_table *tbl, 1117struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1108 u8 *lladdr, void *saddr, 1118 u8 *lladdr, void *saddr,
@@ -1115,6 +1125,7 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1115 NEIGH_UPDATE_F_OVERRIDE); 1125 NEIGH_UPDATE_F_OVERRIDE);
1116 return neigh; 1126 return neigh;
1117} 1127}
1128EXPORT_SYMBOL(neigh_event_ns);
1118 1129
1119static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, 1130static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1120 __be16 protocol) 1131 __be16 protocol)
@@ -1169,6 +1180,7 @@ int neigh_compat_output(struct sk_buff *skb)
1169 1180
1170 return dev_queue_xmit(skb); 1181 return dev_queue_xmit(skb);
1171} 1182}
1183EXPORT_SYMBOL(neigh_compat_output);
1172 1184
1173/* Slow and careful. */ 1185/* Slow and careful. */
1174 1186
@@ -1214,6 +1226,7 @@ out_kfree_skb:
1214 kfree_skb(skb); 1226 kfree_skb(skb);
1215 goto out; 1227 goto out;
1216} 1228}
1229EXPORT_SYMBOL(neigh_resolve_output);
1217 1230
1218/* As fast as possible without hh cache */ 1231/* As fast as possible without hh cache */
1219 1232
@@ -1238,6 +1251,7 @@ int neigh_connected_output(struct sk_buff *skb)
1238 } 1251 }
1239 return err; 1252 return err;
1240} 1253}
1254EXPORT_SYMBOL(neigh_connected_output);
1241 1255
1242static void neigh_proxy_process(unsigned long arg) 1256static void neigh_proxy_process(unsigned long arg)
1243{ 1257{
@@ -1299,6 +1313,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1299 mod_timer(&tbl->proxy_timer, sched_next); 1313 mod_timer(&tbl->proxy_timer, sched_next);
1300 spin_unlock(&tbl->proxy_queue.lock); 1314 spin_unlock(&tbl->proxy_queue.lock);
1301} 1315}
1316EXPORT_SYMBOL(pneigh_enqueue);
1302 1317
1303static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, 1318static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1304 struct net *net, int ifindex) 1319 struct net *net, int ifindex)
@@ -1306,9 +1321,7 @@ static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1306 struct neigh_parms *p; 1321 struct neigh_parms *p;
1307 1322
1308 for (p = &tbl->parms; p; p = p->next) { 1323 for (p = &tbl->parms; p; p = p->next) {
1309 if (p->net != net) 1324 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1310 continue;
1311 if ((p->dev && p->dev->ifindex == ifindex) ||
1312 (!p->dev && !ifindex)) 1325 (!p->dev && !ifindex))
1313 return p; 1326 return p;
1314 } 1327 }
@@ -1322,7 +1335,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1322 struct neigh_parms *p, *ref; 1335 struct neigh_parms *p, *ref;
1323 struct net *net; 1336 struct net *net;
1324 1337
1325 net = dev->nd_net; 1338 net = dev_net(dev);
1326 ref = lookup_neigh_params(tbl, net, 0); 1339 ref = lookup_neigh_params(tbl, net, 0);
1327 if (!ref) 1340 if (!ref)
1328 return NULL; 1341 return NULL;
@@ -1342,7 +1355,9 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1342 1355
1343 dev_hold(dev); 1356 dev_hold(dev);
1344 p->dev = dev; 1357 p->dev = dev;
1358#ifdef CONFIG_NET_NS
1345 p->net = hold_net(net); 1359 p->net = hold_net(net);
1360#endif
1346 p->sysctl_table = NULL; 1361 p->sysctl_table = NULL;
1347 write_lock_bh(&tbl->lock); 1362 write_lock_bh(&tbl->lock);
1348 p->next = tbl->parms.next; 1363 p->next = tbl->parms.next;
@@ -1351,6 +1366,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1351 } 1366 }
1352 return p; 1367 return p;
1353} 1368}
1369EXPORT_SYMBOL(neigh_parms_alloc);
1354 1370
1355static void neigh_rcu_free_parms(struct rcu_head *head) 1371static void neigh_rcu_free_parms(struct rcu_head *head)
1356{ 1372{
@@ -1381,10 +1397,11 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1381 write_unlock_bh(&tbl->lock); 1397 write_unlock_bh(&tbl->lock);
1382 NEIGH_PRINTK1("neigh_parms_release: not found\n"); 1398 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1383} 1399}
1400EXPORT_SYMBOL(neigh_parms_release);
1384 1401
1385static void neigh_parms_destroy(struct neigh_parms *parms) 1402static void neigh_parms_destroy(struct neigh_parms *parms)
1386{ 1403{
1387 release_net(parms->net); 1404 release_net(neigh_parms_net(parms));
1388 kfree(parms); 1405 kfree(parms);
1389} 1406}
1390 1407
@@ -1395,7 +1412,9 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1395 unsigned long now = jiffies; 1412 unsigned long now = jiffies;
1396 unsigned long phsize; 1413 unsigned long phsize;
1397 1414
1415#ifdef CONFIG_NET_NS
1398 tbl->parms.net = &init_net; 1416 tbl->parms.net = &init_net;
1417#endif
1399 atomic_set(&tbl->parms.refcnt, 1); 1418 atomic_set(&tbl->parms.refcnt, 1);
1400 INIT_RCU_HEAD(&tbl->parms.rcu_head); 1419 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1401 tbl->parms.reachable_time = 1420 tbl->parms.reachable_time =
@@ -1441,6 +1460,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1441 tbl->last_flush = now; 1460 tbl->last_flush = now;
1442 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1461 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1443} 1462}
1463EXPORT_SYMBOL(neigh_table_init_no_netlink);
1444 1464
1445void neigh_table_init(struct neigh_table *tbl) 1465void neigh_table_init(struct neigh_table *tbl)
1446{ 1466{
@@ -1462,6 +1482,7 @@ void neigh_table_init(struct neigh_table *tbl)
1462 dump_stack(); 1482 dump_stack();
1463 } 1483 }
1464} 1484}
1485EXPORT_SYMBOL(neigh_table_init);
1465 1486
1466int neigh_table_clear(struct neigh_table *tbl) 1487int neigh_table_clear(struct neigh_table *tbl)
1467{ 1488{
@@ -1499,10 +1520,11 @@ int neigh_table_clear(struct neigh_table *tbl)
1499 1520
1500 return 0; 1521 return 0;
1501} 1522}
1523EXPORT_SYMBOL(neigh_table_clear);
1502 1524
1503static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1525static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1504{ 1526{
1505 struct net *net = skb->sk->sk_net; 1527 struct net *net = sock_net(skb->sk);
1506 struct ndmsg *ndm; 1528 struct ndmsg *ndm;
1507 struct nlattr *dst_attr; 1529 struct nlattr *dst_attr;
1508 struct neigh_table *tbl; 1530 struct neigh_table *tbl;
@@ -1568,7 +1590,7 @@ out:
1568 1590
1569static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1591static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1570{ 1592{
1571 struct net *net = skb->sk->sk_net; 1593 struct net *net = sock_net(skb->sk);
1572 struct ndmsg *ndm; 1594 struct ndmsg *ndm;
1573 struct nlattr *tb[NDA_MAX+1]; 1595 struct nlattr *tb[NDA_MAX+1];
1574 struct neigh_table *tbl; 1596 struct neigh_table *tbl;
@@ -1836,7 +1858,7 @@ static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1836 1858
1837static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1859static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1838{ 1860{
1839 struct net *net = skb->sk->sk_net; 1861 struct net *net = sock_net(skb->sk);
1840 struct neigh_table *tbl; 1862 struct neigh_table *tbl;
1841 struct ndtmsg *ndtmsg; 1863 struct ndtmsg *ndtmsg;
1842 struct nlattr *tb[NDTA_MAX+1]; 1864 struct nlattr *tb[NDTA_MAX+1];
@@ -1961,7 +1983,7 @@ errout:
1961 1983
1962static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 1984static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1963{ 1985{
1964 struct net *net = skb->sk->sk_net; 1986 struct net *net = sock_net(skb->sk);
1965 int family, tidx, nidx = 0; 1987 int family, tidx, nidx = 0;
1966 int tbl_skip = cb->args[0]; 1988 int tbl_skip = cb->args[0];
1967 int neigh_skip = cb->args[1]; 1989 int neigh_skip = cb->args[1];
@@ -1982,7 +2004,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1982 break; 2004 break;
1983 2005
1984 for (nidx = 0, p = tbl->parms.next; p; p = p->next) { 2006 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
1985 if (net != p->net) 2007 if (!net_eq(neigh_parms_net(p), net))
1986 continue; 2008 continue;
1987 2009
1988 if (nidx++ < neigh_skip) 2010 if (nidx++ < neigh_skip)
@@ -2061,7 +2083,7 @@ static void neigh_update_notify(struct neighbour *neigh)
2061static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2083static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2062 struct netlink_callback *cb) 2084 struct netlink_callback *cb)
2063{ 2085{
2064 struct net * net = skb->sk->sk_net; 2086 struct net * net = sock_net(skb->sk);
2065 struct neighbour *n; 2087 struct neighbour *n;
2066 int rc, h, s_h = cb->args[1]; 2088 int rc, h, s_h = cb->args[1];
2067 int idx, s_idx = idx = cb->args[2]; 2089 int idx, s_idx = idx = cb->args[2];
@@ -2074,7 +2096,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2074 s_idx = 0; 2096 s_idx = 0;
2075 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) { 2097 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2076 int lidx; 2098 int lidx;
2077 if (n->dev->nd_net != net) 2099 if (dev_net(n->dev) != net)
2078 continue; 2100 continue;
2079 lidx = idx++; 2101 lidx = idx++;
2080 if (lidx < s_idx) 2102 if (lidx < s_idx)
@@ -2169,7 +2191,7 @@ EXPORT_SYMBOL(__neigh_for_each_release);
2169static struct neighbour *neigh_get_first(struct seq_file *seq) 2191static struct neighbour *neigh_get_first(struct seq_file *seq)
2170{ 2192{
2171 struct neigh_seq_state *state = seq->private; 2193 struct neigh_seq_state *state = seq->private;
2172 struct net *net = state->p.net; 2194 struct net *net = seq_file_net(seq);
2173 struct neigh_table *tbl = state->tbl; 2195 struct neigh_table *tbl = state->tbl;
2174 struct neighbour *n = NULL; 2196 struct neighbour *n = NULL;
2175 int bucket = state->bucket; 2197 int bucket = state->bucket;
@@ -2179,7 +2201,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
2179 n = tbl->hash_buckets[bucket]; 2201 n = tbl->hash_buckets[bucket];
2180 2202
2181 while (n) { 2203 while (n) {
2182 if (n->dev->nd_net != net) 2204 if (!net_eq(dev_net(n->dev), net))
2183 goto next; 2205 goto next;
2184 if (state->neigh_sub_iter) { 2206 if (state->neigh_sub_iter) {
2185 loff_t fakep = 0; 2207 loff_t fakep = 0;
@@ -2210,7 +2232,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2210 loff_t *pos) 2232 loff_t *pos)
2211{ 2233{
2212 struct neigh_seq_state *state = seq->private; 2234 struct neigh_seq_state *state = seq->private;
2213 struct net *net = state->p.net; 2235 struct net *net = seq_file_net(seq);
2214 struct neigh_table *tbl = state->tbl; 2236 struct neigh_table *tbl = state->tbl;
2215 2237
2216 if (state->neigh_sub_iter) { 2238 if (state->neigh_sub_iter) {
@@ -2222,7 +2244,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2222 2244
2223 while (1) { 2245 while (1) {
2224 while (n) { 2246 while (n) {
2225 if (n->dev->nd_net != net) 2247 if (!net_eq(dev_net(n->dev), net))
2226 goto next; 2248 goto next;
2227 if (state->neigh_sub_iter) { 2249 if (state->neigh_sub_iter) {
2228 void *v = state->neigh_sub_iter(state, n, pos); 2250 void *v = state->neigh_sub_iter(state, n, pos);
@@ -2270,7 +2292,7 @@ static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2270static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) 2292static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2271{ 2293{
2272 struct neigh_seq_state *state = seq->private; 2294 struct neigh_seq_state *state = seq->private;
2273 struct net * net = state->p.net; 2295 struct net *net = seq_file_net(seq);
2274 struct neigh_table *tbl = state->tbl; 2296 struct neigh_table *tbl = state->tbl;
2275 struct pneigh_entry *pn = NULL; 2297 struct pneigh_entry *pn = NULL;
2276 int bucket = state->bucket; 2298 int bucket = state->bucket;
@@ -2278,7 +2300,7 @@ static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2278 state->flags |= NEIGH_SEQ_IS_PNEIGH; 2300 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2279 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { 2301 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2280 pn = tbl->phash_buckets[bucket]; 2302 pn = tbl->phash_buckets[bucket];
2281 while (pn && (pn->net != net)) 2303 while (pn && !net_eq(pneigh_net(pn), net))
2282 pn = pn->next; 2304 pn = pn->next;
2283 if (pn) 2305 if (pn)
2284 break; 2306 break;
@@ -2293,7 +2315,7 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2293 loff_t *pos) 2315 loff_t *pos)
2294{ 2316{
2295 struct neigh_seq_state *state = seq->private; 2317 struct neigh_seq_state *state = seq->private;
2296 struct net * net = state->p.net; 2318 struct net *net = seq_file_net(seq);
2297 struct neigh_table *tbl = state->tbl; 2319 struct neigh_table *tbl = state->tbl;
2298 2320
2299 pn = pn->next; 2321 pn = pn->next;
@@ -2301,7 +2323,7 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2301 if (++state->bucket > PNEIGH_HASHMASK) 2323 if (++state->bucket > PNEIGH_HASHMASK)
2302 break; 2324 break;
2303 pn = tbl->phash_buckets[state->bucket]; 2325 pn = tbl->phash_buckets[state->bucket];
2304 while (pn && (pn->net != net)) 2326 while (pn && !net_eq(pneigh_net(pn), net))
2305 pn = pn->next; 2327 pn = pn->next;
2306 if (pn) 2328 if (pn)
2307 break; 2329 break;
@@ -2506,7 +2528,7 @@ static inline size_t neigh_nlmsg_size(void)
2506 2528
2507static void __neigh_notify(struct neighbour *n, int type, int flags) 2529static void __neigh_notify(struct neighbour *n, int type, int flags)
2508{ 2530{
2509 struct net *net = n->dev->nd_net; 2531 struct net *net = dev_net(n->dev);
2510 struct sk_buff *skb; 2532 struct sk_buff *skb;
2511 int err = -ENOBUFS; 2533 int err = -ENOBUFS;
2512 2534
@@ -2532,6 +2554,7 @@ void neigh_app_ns(struct neighbour *n)
2532{ 2554{
2533 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST); 2555 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2534} 2556}
2557EXPORT_SYMBOL(neigh_app_ns);
2535#endif /* CONFIG_ARPD */ 2558#endif /* CONFIG_ARPD */
2536 2559
2537#ifdef CONFIG_SYSCTL 2560#ifdef CONFIG_SYSCTL
@@ -2763,7 +2786,8 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2763 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name; 2786 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2764 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id; 2787 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2765 2788
2766 t->sysctl_header = register_sysctl_paths(neigh_path, t->neigh_vars); 2789 t->sysctl_header =
2790 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2767 if (!t->sysctl_header) 2791 if (!t->sysctl_header)
2768 goto free_procname; 2792 goto free_procname;
2769 2793
@@ -2777,6 +2801,7 @@ free:
2777err: 2801err:
2778 return -ENOBUFS; 2802 return -ENOBUFS;
2779} 2803}
2804EXPORT_SYMBOL(neigh_sysctl_register);
2780 2805
2781void neigh_sysctl_unregister(struct neigh_parms *p) 2806void neigh_sysctl_unregister(struct neigh_parms *p)
2782{ 2807{
@@ -2788,6 +2813,7 @@ void neigh_sysctl_unregister(struct neigh_parms *p)
2788 kfree(t); 2813 kfree(t);
2789 } 2814 }
2790} 2815}
2816EXPORT_SYMBOL(neigh_sysctl_unregister);
2791 2817
2792#endif /* CONFIG_SYSCTL */ 2818#endif /* CONFIG_SYSCTL */
2793 2819
@@ -2805,32 +2831,3 @@ static int __init neigh_init(void)
2805 2831
2806subsys_initcall(neigh_init); 2832subsys_initcall(neigh_init);
2807 2833
2808EXPORT_SYMBOL(__neigh_event_send);
2809EXPORT_SYMBOL(neigh_changeaddr);
2810EXPORT_SYMBOL(neigh_compat_output);
2811EXPORT_SYMBOL(neigh_connected_output);
2812EXPORT_SYMBOL(neigh_create);
2813EXPORT_SYMBOL(neigh_destroy);
2814EXPORT_SYMBOL(neigh_event_ns);
2815EXPORT_SYMBOL(neigh_ifdown);
2816EXPORT_SYMBOL(neigh_lookup);
2817EXPORT_SYMBOL(neigh_lookup_nodev);
2818EXPORT_SYMBOL(neigh_parms_alloc);
2819EXPORT_SYMBOL(neigh_parms_release);
2820EXPORT_SYMBOL(neigh_rand_reach_time);
2821EXPORT_SYMBOL(neigh_resolve_output);
2822EXPORT_SYMBOL(neigh_table_clear);
2823EXPORT_SYMBOL(neigh_table_init);
2824EXPORT_SYMBOL(neigh_table_init_no_netlink);
2825EXPORT_SYMBOL(neigh_update);
2826EXPORT_SYMBOL(pneigh_enqueue);
2827EXPORT_SYMBOL(pneigh_lookup);
2828EXPORT_SYMBOL_GPL(__pneigh_lookup);
2829
2830#ifdef CONFIG_ARPD
2831EXPORT_SYMBOL(neigh_app_ns);
2832#endif
2833#ifdef CONFIG_SYSCTL
2834EXPORT_SYMBOL(neigh_sysctl_register);
2835EXPORT_SYMBOL(neigh_sysctl_unregister);
2836#endif
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7b660834a4c2..72b4c184dd84 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -5,7 +5,9 @@
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/idr.h>
8#include <net/net_namespace.h> 9#include <net/net_namespace.h>
10#include <net/netns/generic.h>
9 11
10/* 12/*
11 * Our network namespace constructor/destructor lists 13 * Our network namespace constructor/destructor lists
@@ -20,6 +22,8 @@ LIST_HEAD(net_namespace_list);
20struct net init_net; 22struct net init_net;
21EXPORT_SYMBOL(init_net); 23EXPORT_SYMBOL(init_net);
22 24
25#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
26
23/* 27/*
24 * setup_net runs the initializers for the network namespace object. 28 * setup_net runs the initializers for the network namespace object.
25 */ 29 */
@@ -28,9 +32,22 @@ static __net_init int setup_net(struct net *net)
28 /* Must be called with net_mutex held */ 32 /* Must be called with net_mutex held */
29 struct pernet_operations *ops; 33 struct pernet_operations *ops;
30 int error; 34 int error;
35 struct net_generic *ng;
31 36
32 atomic_set(&net->count, 1); 37 atomic_set(&net->count, 1);
38#ifdef NETNS_REFCNT_DEBUG
33 atomic_set(&net->use_count, 0); 39 atomic_set(&net->use_count, 0);
40#endif
41
42 error = -ENOMEM;
43 ng = kzalloc(sizeof(struct net_generic) +
44 INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL);
45 if (ng == NULL)
46 goto out;
47
48 ng->len = INITIAL_NET_GEN_PTRS;
49 INIT_RCU_HEAD(&ng->rcu);
50 rcu_assign_pointer(net->gen, ng);
34 51
35 error = 0; 52 error = 0;
36 list_for_each_entry(ops, &pernet_list, list) { 53 list_for_each_entry(ops, &pernet_list, list) {
@@ -53,6 +70,7 @@ out_undo:
53 } 70 }
54 71
55 rcu_barrier(); 72 rcu_barrier();
73 kfree(ng);
56 goto out; 74 goto out;
57} 75}
58 76
@@ -70,11 +88,13 @@ static void net_free(struct net *net)
70 if (!net) 88 if (!net)
71 return; 89 return;
72 90
91#ifdef NETNS_REFCNT_DEBUG
73 if (unlikely(atomic_read(&net->use_count) != 0)) { 92 if (unlikely(atomic_read(&net->use_count) != 0)) {
74 printk(KERN_EMERG "network namespace not free! Usage: %d\n", 93 printk(KERN_EMERG "network namespace not free! Usage: %d\n",
75 atomic_read(&net->use_count)); 94 atomic_read(&net->use_count));
76 return; 95 return;
77 } 96 }
97#endif
78 98
79 kmem_cache_free(net_cachep, net); 99 kmem_cache_free(net_cachep, net);
80} 100}
@@ -253,6 +273,8 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
253} 273}
254#endif 274#endif
255 275
276static DEFINE_IDA(net_generic_ids);
277
256/** 278/**
257 * register_pernet_subsys - register a network namespace subsystem 279 * register_pernet_subsys - register a network namespace subsystem
258 * @ops: pernet operations structure for the subsystem 280 * @ops: pernet operations structure for the subsystem
@@ -330,6 +352,30 @@ int register_pernet_device(struct pernet_operations *ops)
330} 352}
331EXPORT_SYMBOL_GPL(register_pernet_device); 353EXPORT_SYMBOL_GPL(register_pernet_device);
332 354
355int register_pernet_gen_device(int *id, struct pernet_operations *ops)
356{
357 int error;
358 mutex_lock(&net_mutex);
359again:
360 error = ida_get_new_above(&net_generic_ids, 1, id);
361 if (error) {
362 if (error == -EAGAIN) {
363 ida_pre_get(&net_generic_ids, GFP_KERNEL);
364 goto again;
365 }
366 goto out;
367 }
368 error = register_pernet_operations(&pernet_list, ops);
369 if (error)
370 ida_remove(&net_generic_ids, *id);
371 else if (first_device == &pernet_list)
372 first_device = &ops->list;
373out:
374 mutex_unlock(&net_mutex);
375 return error;
376}
377EXPORT_SYMBOL_GPL(register_pernet_gen_device);
378
333/** 379/**
334 * unregister_pernet_device - unregister a network namespace netdevice 380 * unregister_pernet_device - unregister a network namespace netdevice
335 * @ops: pernet operations structure to manipulate 381 * @ops: pernet operations structure to manipulate
@@ -348,3 +394,61 @@ void unregister_pernet_device(struct pernet_operations *ops)
348 mutex_unlock(&net_mutex); 394 mutex_unlock(&net_mutex);
349} 395}
350EXPORT_SYMBOL_GPL(unregister_pernet_device); 396EXPORT_SYMBOL_GPL(unregister_pernet_device);
397
398void unregister_pernet_gen_device(int id, struct pernet_operations *ops)
399{
400 mutex_lock(&net_mutex);
401 if (&ops->list == first_device)
402 first_device = first_device->next;
403 unregister_pernet_operations(ops);
404 ida_remove(&net_generic_ids, id);
405 mutex_unlock(&net_mutex);
406}
407EXPORT_SYMBOL_GPL(unregister_pernet_gen_device);
408
409static void net_generic_release(struct rcu_head *rcu)
410{
411 struct net_generic *ng;
412
413 ng = container_of(rcu, struct net_generic, rcu);
414 kfree(ng);
415}
416
417int net_assign_generic(struct net *net, int id, void *data)
418{
419 struct net_generic *ng, *old_ng;
420
421 BUG_ON(!mutex_is_locked(&net_mutex));
422 BUG_ON(id == 0);
423
424 ng = old_ng = net->gen;
425 if (old_ng->len >= id)
426 goto assign;
427
428 ng = kzalloc(sizeof(struct net_generic) +
429 id * sizeof(void *), GFP_KERNEL);
430 if (ng == NULL)
431 return -ENOMEM;
432
433 /*
434 * Some synchronisation notes:
435 *
436 * The net_generic explores the net->gen array inside rcu
437 * read section. Besides once set the net->gen->ptr[x]
438 * pointer never changes (see rules in netns/generic.h).
439 *
440 * That said, we simply duplicate this array and schedule
441 * the old copy for kfree after a grace period.
442 */
443
444 ng->len = id;
445 INIT_RCU_HEAD(&ng->rcu);
446 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len);
447
448 rcu_assign_pointer(net->gen, ng);
449 call_rcu(&old_ng->rcu, net_generic_release);
450assign:
451 ng->ptr[id - 1] = data;
452 return 0;
453}
454EXPORT_SYMBOL_GPL(net_assign_generic);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c635de52526c..b04d643fc3c7 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -390,9 +390,7 @@ static void arp_reply(struct sk_buff *skb)
390 if (skb->dev->flags & IFF_NOARP) 390 if (skb->dev->flags & IFF_NOARP)
391 return; 391 return;
392 392
393 if (!pskb_may_pull(skb, (sizeof(struct arphdr) + 393 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
394 (2 * skb->dev->addr_len) +
395 (2 * sizeof(u32)))))
396 return; 394 return;
397 395
398 skb_reset_network_header(skb); 396 skb_reset_network_header(skb);
@@ -420,7 +418,7 @@ static void arp_reply(struct sk_buff *skb)
420 ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) 418 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
421 return; 419 return;
422 420
423 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4); 421 size = arp_hdr_len(skb->dev);
424 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), 422 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
425 LL_RESERVED_SPACE(np->dev)); 423 LL_RESERVED_SPACE(np->dev));
426 424
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 20e63b302ba6..a803b442234c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1874,7 +1874,7 @@ static int pktgen_device_event(struct notifier_block *unused,
1874{ 1874{
1875 struct net_device *dev = ptr; 1875 struct net_device *dev = ptr;
1876 1876
1877 if (dev->nd_net != &init_net) 1877 if (dev_net(dev) != &init_net)
1878 return NOTIFY_DONE; 1878 return NOTIFY_DONE;
1879 1879
1880 /* It is OK that we do not hold the group lock right now, 1880 /* It is OK that we do not hold the group lock right now,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2bd9c5f7627d..bc39e417694a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -269,6 +269,26 @@ int rtnl_link_register(struct rtnl_link_ops *ops)
269 269
270EXPORT_SYMBOL_GPL(rtnl_link_register); 270EXPORT_SYMBOL_GPL(rtnl_link_register);
271 271
272static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
273{
274 struct net_device *dev;
275restart:
276 for_each_netdev(net, dev) {
277 if (dev->rtnl_link_ops == ops) {
278 ops->dellink(dev);
279 goto restart;
280 }
281 }
282}
283
284void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
285{
286 rtnl_lock();
287 __rtnl_kill_links(net, ops);
288 rtnl_unlock();
289}
290EXPORT_SYMBOL_GPL(rtnl_kill_links);
291
272/** 292/**
273 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 293 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
274 * @ops: struct rtnl_link_ops * to unregister 294 * @ops: struct rtnl_link_ops * to unregister
@@ -277,17 +297,10 @@ EXPORT_SYMBOL_GPL(rtnl_link_register);
277 */ 297 */
278void __rtnl_link_unregister(struct rtnl_link_ops *ops) 298void __rtnl_link_unregister(struct rtnl_link_ops *ops)
279{ 299{
280 struct net_device *dev, *n;
281 struct net *net; 300 struct net *net;
282 301
283 for_each_net(net) { 302 for_each_net(net) {
284restart: 303 __rtnl_kill_links(net, ops);
285 for_each_netdev_safe(net, dev, n) {
286 if (dev->rtnl_link_ops == ops) {
287 ops->dellink(dev);
288 goto restart;
289 }
290 }
291 } 304 }
292 list_del(&ops->list); 305 list_del(&ops->list);
293} 306}
@@ -662,7 +675,7 @@ nla_put_failure:
662 675
663static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 676static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
664{ 677{
665 struct net *net = skb->sk->sk_net; 678 struct net *net = sock_net(skb->sk);
666 int idx; 679 int idx;
667 int s_idx = cb->args[0]; 680 int s_idx = cb->args[0];
668 struct net_device *dev; 681 struct net_device *dev;
@@ -879,7 +892,7 @@ errout:
879 892
880static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 893static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
881{ 894{
882 struct net *net = skb->sk->sk_net; 895 struct net *net = sock_net(skb->sk);
883 struct ifinfomsg *ifm; 896 struct ifinfomsg *ifm;
884 struct net_device *dev; 897 struct net_device *dev;
885 int err; 898 int err;
@@ -921,7 +934,7 @@ errout:
921 934
922static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 935static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
923{ 936{
924 struct net *net = skb->sk->sk_net; 937 struct net *net = sock_net(skb->sk);
925 const struct rtnl_link_ops *ops; 938 const struct rtnl_link_ops *ops;
926 struct net_device *dev; 939 struct net_device *dev;
927 struct ifinfomsg *ifm; 940 struct ifinfomsg *ifm;
@@ -972,7 +985,7 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
972 goto err_free; 985 goto err_free;
973 } 986 }
974 987
975 dev->nd_net = net; 988 dev_net_set(dev, net);
976 dev->rtnl_link_ops = ops; 989 dev->rtnl_link_ops = ops;
977 990
978 if (tb[IFLA_MTU]) 991 if (tb[IFLA_MTU])
@@ -1000,7 +1013,7 @@ err:
1000 1013
1001static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1014static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1002{ 1015{
1003 struct net *net = skb->sk->sk_net; 1016 struct net *net = sock_net(skb->sk);
1004 const struct rtnl_link_ops *ops; 1017 const struct rtnl_link_ops *ops;
1005 struct net_device *dev; 1018 struct net_device *dev;
1006 struct ifinfomsg *ifm; 1019 struct ifinfomsg *ifm;
@@ -1132,7 +1145,7 @@ replay:
1132 1145
1133static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 1146static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1134{ 1147{
1135 struct net *net = skb->sk->sk_net; 1148 struct net *net = sock_net(skb->sk);
1136 struct ifinfomsg *ifm; 1149 struct ifinfomsg *ifm;
1137 struct nlattr *tb[IFLA_MAX+1]; 1150 struct nlattr *tb[IFLA_MAX+1];
1138 struct net_device *dev = NULL; 1151 struct net_device *dev = NULL;
@@ -1198,7 +1211,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1198 1211
1199void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) 1212void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
1200{ 1213{
1201 struct net *net = dev->nd_net; 1214 struct net *net = dev_net(dev);
1202 struct sk_buff *skb; 1215 struct sk_buff *skb;
1203 int err = -ENOBUFS; 1216 int err = -ENOBUFS;
1204 1217
@@ -1227,7 +1240,7 @@ static int rtattr_max;
1227 1240
1228static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1241static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1229{ 1242{
1230 struct net *net = skb->sk->sk_net; 1243 struct net *net = sock_net(skb->sk);
1231 rtnl_doit_func doit; 1244 rtnl_doit_func doit;
1232 int sz_idx, kind; 1245 int sz_idx, kind;
1233 int min_len; 1246 int min_len;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 608701339620..4fe605fa6f8a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -263,6 +263,28 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
263 return skb; 263 return skb;
264} 264}
265 265
266/**
267 * dev_alloc_skb - allocate an skbuff for receiving
268 * @length: length to allocate
269 *
270 * Allocate a new &sk_buff and assign it a usage count of one. The
271 * buffer has unspecified headroom built in. Users should allocate
272 * the headroom they think they need without accounting for the
273 * built in space. The built in space is used for optimisations.
274 *
275 * %NULL is returned if there is no free memory. Although this function
276 * allocates memory it can be called from an interrupt.
277 */
278struct sk_buff *dev_alloc_skb(unsigned int length)
279{
280 /*
281 * There is more code here than it seems:
282 * __dev_alloc_skb is an inline
283 */
284 return __dev_alloc_skb(length, GFP_ATOMIC);
285}
286EXPORT_SYMBOL(dev_alloc_skb);
287
266static void skb_drop_list(struct sk_buff **listp) 288static void skb_drop_list(struct sk_buff **listp)
267{ 289{
268 struct sk_buff *list = *listp; 290 struct sk_buff *list = *listp;
@@ -857,6 +879,78 @@ free_skb:
857 return err; 879 return err;
858} 880}
859 881
882/**
883 * skb_put - add data to a buffer
884 * @skb: buffer to use
885 * @len: amount of data to add
886 *
887 * This function extends the used data area of the buffer. If this would
888 * exceed the total buffer size the kernel will panic. A pointer to the
889 * first byte of the extra data is returned.
890 */
891unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
892{
893 unsigned char *tmp = skb_tail_pointer(skb);
894 SKB_LINEAR_ASSERT(skb);
895 skb->tail += len;
896 skb->len += len;
897 if (unlikely(skb->tail > skb->end))
898 skb_over_panic(skb, len, __builtin_return_address(0));
899 return tmp;
900}
901EXPORT_SYMBOL(skb_put);
902
903/**
904 * skb_push - add data to the start of a buffer
905 * @skb: buffer to use
906 * @len: amount of data to add
907 *
908 * This function extends the used data area of the buffer at the buffer
909 * start. If this would exceed the total buffer headroom the kernel will
910 * panic. A pointer to the first byte of the extra data is returned.
911 */
912unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
913{
914 skb->data -= len;
915 skb->len += len;
916 if (unlikely(skb->data<skb->head))
917 skb_under_panic(skb, len, __builtin_return_address(0));
918 return skb->data;
919}
920EXPORT_SYMBOL(skb_push);
921
922/**
923 * skb_pull - remove data from the start of a buffer
924 * @skb: buffer to use
925 * @len: amount of data to remove
926 *
927 * This function removes data from the start of a buffer, returning
928 * the memory to the headroom. A pointer to the next data in the buffer
929 * is returned. Once the data has been pulled future pushes will overwrite
930 * the old data.
931 */
932unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
933{
934 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
935}
936EXPORT_SYMBOL(skb_pull);
937
938/**
939 * skb_trim - remove end from a buffer
940 * @skb: buffer to alter
941 * @len: new length
942 *
943 * Cut the length of a buffer down by removing data from the tail. If
944 * the buffer is already under the length specified it is not modified.
945 * The skb must be linear.
946 */
947void skb_trim(struct sk_buff *skb, unsigned int len)
948{
949 if (skb->len > len)
950 __skb_trim(skb, len);
951}
952EXPORT_SYMBOL(skb_trim);
953
860/* Trims skb to length len. It can change skb pointers. 954/* Trims skb to length len. It can change skb pointers.
861 */ 955 */
862 956
@@ -1766,7 +1860,7 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
1766 unsigned long flags; 1860 unsigned long flags;
1767 1861
1768 spin_lock_irqsave(&list->lock, flags); 1862 spin_lock_irqsave(&list->lock, flags);
1769 __skb_append(old, newsk, list); 1863 __skb_queue_after(list, old, newsk);
1770 spin_unlock_irqrestore(&list->lock, flags); 1864 spin_unlock_irqrestore(&list->lock, flags);
1771} 1865}
1772 1866
diff --git a/net/core/sock.c b/net/core/sock.c
index 7a0567b4b2c9..54c836a2216b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -372,7 +372,7 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
372{ 372{
373 int ret = -ENOPROTOOPT; 373 int ret = -ENOPROTOOPT;
374#ifdef CONFIG_NETDEVICES 374#ifdef CONFIG_NETDEVICES
375 struct net *net = sk->sk_net; 375 struct net *net = sock_net(sk);
376 char devname[IFNAMSIZ]; 376 char devname[IFNAMSIZ];
377 int index; 377 int index;
378 378
@@ -958,7 +958,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
958 */ 958 */
959 sk->sk_prot = sk->sk_prot_creator = prot; 959 sk->sk_prot = sk->sk_prot_creator = prot;
960 sock_lock_init(sk); 960 sock_lock_init(sk);
961 sk->sk_net = get_net(net); 961 sock_net_set(sk, get_net(net));
962 } 962 }
963 963
964 return sk; 964 return sk;
@@ -981,12 +981,32 @@ void sk_free(struct sock *sk)
981 981
982 if (atomic_read(&sk->sk_omem_alloc)) 982 if (atomic_read(&sk->sk_omem_alloc))
983 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 983 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
984 __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); 984 __func__, atomic_read(&sk->sk_omem_alloc));
985 985
986 put_net(sk->sk_net); 986 put_net(sock_net(sk));
987 sk_prot_free(sk->sk_prot_creator, sk); 987 sk_prot_free(sk->sk_prot_creator, sk);
988} 988}
989 989
990/*
991 * Last sock_put should drop referrence to sk->sk_net. It has already
992 * been dropped in sk_change_net. Taking referrence to stopping namespace
993 * is not an option.
994 * Take referrence to a socket to remove it from hash _alive_ and after that
995 * destroy it in the context of init_net.
996 */
997void sk_release_kernel(struct sock *sk)
998{
999 if (sk == NULL || sk->sk_socket == NULL)
1000 return;
1001
1002 sock_hold(sk);
1003 sock_release(sk->sk_socket);
1004 release_net(sock_net(sk));
1005 sock_net_set(sk, get_net(&init_net));
1006 sock_put(sk);
1007}
1008EXPORT_SYMBOL(sk_release_kernel);
1009
990struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 1010struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
991{ 1011{
992 struct sock *newsk; 1012 struct sock *newsk;
@@ -998,7 +1018,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
998 sock_copy(newsk, sk); 1018 sock_copy(newsk, sk);
999 1019
1000 /* SANITY */ 1020 /* SANITY */
1001 get_net(newsk->sk_net); 1021 get_net(sock_net(newsk));
1002 sk_node_init(&newsk->sk_node); 1022 sk_node_init(&newsk->sk_node);
1003 sock_lock_init(newsk); 1023 sock_lock_init(newsk);
1004 bh_lock_sock(newsk); 1024 bh_lock_sock(newsk);
@@ -1076,10 +1096,12 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1076 if (sk->sk_route_caps & NETIF_F_GSO) 1096 if (sk->sk_route_caps & NETIF_F_GSO)
1077 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1097 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1078 if (sk_can_gso(sk)) { 1098 if (sk_can_gso(sk)) {
1079 if (dst->header_len) 1099 if (dst->header_len) {
1080 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1100 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1081 else 1101 } else {
1082 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1102 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1103 sk->sk_gso_max_size = dst->dev->gso_max_size;
1104 }
1083 } 1105 }
1084} 1106}
1085EXPORT_SYMBOL_GPL(sk_setup_caps); 1107EXPORT_SYMBOL_GPL(sk_setup_caps);
@@ -1919,16 +1941,113 @@ EXPORT_SYMBOL(sk_common_release);
1919static DEFINE_RWLOCK(proto_list_lock); 1941static DEFINE_RWLOCK(proto_list_lock);
1920static LIST_HEAD(proto_list); 1942static LIST_HEAD(proto_list);
1921 1943
1944#ifdef CONFIG_PROC_FS
1945#define PROTO_INUSE_NR 64 /* should be enough for the first time */
1946struct prot_inuse {
1947 int val[PROTO_INUSE_NR];
1948};
1949
1950static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
1951
1952#ifdef CONFIG_NET_NS
1953void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
1954{
1955 int cpu = smp_processor_id();
1956 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
1957}
1958EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
1959
1960int sock_prot_inuse_get(struct net *net, struct proto *prot)
1961{
1962 int cpu, idx = prot->inuse_idx;
1963 int res = 0;
1964
1965 for_each_possible_cpu(cpu)
1966 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
1967
1968 return res >= 0 ? res : 0;
1969}
1970EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
1971
1972static int sock_inuse_init_net(struct net *net)
1973{
1974 net->core.inuse = alloc_percpu(struct prot_inuse);
1975 return net->core.inuse ? 0 : -ENOMEM;
1976}
1977
1978static void sock_inuse_exit_net(struct net *net)
1979{
1980 free_percpu(net->core.inuse);
1981}
1982
1983static struct pernet_operations net_inuse_ops = {
1984 .init = sock_inuse_init_net,
1985 .exit = sock_inuse_exit_net,
1986};
1987
1988static __init int net_inuse_init(void)
1989{
1990 if (register_pernet_subsys(&net_inuse_ops))
1991 panic("Cannot initialize net inuse counters");
1992
1993 return 0;
1994}
1995
1996core_initcall(net_inuse_init);
1997#else
1998static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
1999
2000void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2001{
2002 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val;
2003}
2004EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2005
2006int sock_prot_inuse_get(struct net *net, struct proto *prot)
2007{
2008 int cpu, idx = prot->inuse_idx;
2009 int res = 0;
2010
2011 for_each_possible_cpu(cpu)
2012 res += per_cpu(prot_inuse, cpu).val[idx];
2013
2014 return res >= 0 ? res : 0;
2015}
2016EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2017#endif
2018
2019static void assign_proto_idx(struct proto *prot)
2020{
2021 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2022
2023 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2024 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2025 return;
2026 }
2027
2028 set_bit(prot->inuse_idx, proto_inuse_idx);
2029}
2030
2031static void release_proto_idx(struct proto *prot)
2032{
2033 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2034 clear_bit(prot->inuse_idx, proto_inuse_idx);
2035}
2036#else
2037static inline void assign_proto_idx(struct proto *prot)
2038{
2039}
2040
2041static inline void release_proto_idx(struct proto *prot)
2042{
2043}
2044#endif
2045
1922int proto_register(struct proto *prot, int alloc_slab) 2046int proto_register(struct proto *prot, int alloc_slab)
1923{ 2047{
1924 char *request_sock_slab_name = NULL; 2048 char *request_sock_slab_name = NULL;
1925 char *timewait_sock_slab_name; 2049 char *timewait_sock_slab_name;
1926 2050
1927 if (sock_prot_inuse_init(prot) != 0) {
1928 printk(KERN_CRIT "%s: Can't alloc inuse counters!\n", prot->name);
1929 goto out;
1930 }
1931
1932 if (alloc_slab) { 2051 if (alloc_slab) {
1933 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2052 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
1934 SLAB_HWCACHE_ALIGN, NULL); 2053 SLAB_HWCACHE_ALIGN, NULL);
@@ -1936,7 +2055,7 @@ int proto_register(struct proto *prot, int alloc_slab)
1936 if (prot->slab == NULL) { 2055 if (prot->slab == NULL) {
1937 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2056 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1938 prot->name); 2057 prot->name);
1939 goto out_free_inuse; 2058 goto out;
1940 } 2059 }
1941 2060
1942 if (prot->rsk_prot != NULL) { 2061 if (prot->rsk_prot != NULL) {
@@ -1979,6 +2098,7 @@ int proto_register(struct proto *prot, int alloc_slab)
1979 2098
1980 write_lock(&proto_list_lock); 2099 write_lock(&proto_list_lock);
1981 list_add(&prot->node, &proto_list); 2100 list_add(&prot->node, &proto_list);
2101 assign_proto_idx(prot);
1982 write_unlock(&proto_list_lock); 2102 write_unlock(&proto_list_lock);
1983 return 0; 2103 return 0;
1984 2104
@@ -1994,8 +2114,6 @@ out_free_request_sock_slab_name:
1994out_free_sock_slab: 2114out_free_sock_slab:
1995 kmem_cache_destroy(prot->slab); 2115 kmem_cache_destroy(prot->slab);
1996 prot->slab = NULL; 2116 prot->slab = NULL;
1997out_free_inuse:
1998 sock_prot_inuse_free(prot);
1999out: 2117out:
2000 return -ENOBUFS; 2118 return -ENOBUFS;
2001} 2119}
@@ -2005,11 +2123,10 @@ EXPORT_SYMBOL(proto_register);
2005void proto_unregister(struct proto *prot) 2123void proto_unregister(struct proto *prot)
2006{ 2124{
2007 write_lock(&proto_list_lock); 2125 write_lock(&proto_list_lock);
2126 release_proto_idx(prot);
2008 list_del(&prot->node); 2127 list_del(&prot->node);
2009 write_unlock(&proto_list_lock); 2128 write_unlock(&proto_list_lock);
2010 2129
2011 sock_prot_inuse_free(prot);
2012
2013 if (prot->slab != NULL) { 2130 if (prot->slab != NULL) {
2014 kmem_cache_destroy(prot->slab); 2131 kmem_cache_destroy(prot->slab);
2015 prot->slab = NULL; 2132 prot->slab = NULL;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 130338f83ae5..5fc801057244 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -127,7 +127,7 @@ static struct ctl_table net_core_table[] = {
127 { 127 {
128 .ctl_name = NET_CORE_SOMAXCONN, 128 .ctl_name = NET_CORE_SOMAXCONN,
129 .procname = "somaxconn", 129 .procname = "somaxconn",
130 .data = &init_net.sysctl_somaxconn, 130 .data = &init_net.core.sysctl_somaxconn,
131 .maxlen = sizeof(int), 131 .maxlen = sizeof(int),
132 .mode = 0644, 132 .mode = 0644,
133 .proc_handler = &proc_dointvec 133 .proc_handler = &proc_dointvec
@@ -161,7 +161,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
161{ 161{
162 struct ctl_table *tbl, *tmp; 162 struct ctl_table *tbl, *tmp;
163 163
164 net->sysctl_somaxconn = SOMAXCONN; 164 net->core.sysctl_somaxconn = SOMAXCONN;
165 165
166 tbl = net_core_table; 166 tbl = net_core_table;
167 if (net != &init_net) { 167 if (net != &init_net) {
@@ -178,9 +178,9 @@ static __net_init int sysctl_core_net_init(struct net *net)
178 } 178 }
179 } 179 }
180 180
181 net->sysctl_core_hdr = register_net_sysctl_table(net, 181 net->core.sysctl_hdr = register_net_sysctl_table(net,
182 net_core_path, tbl); 182 net_core_path, tbl);
183 if (net->sysctl_core_hdr == NULL) 183 if (net->core.sysctl_hdr == NULL)
184 goto err_reg; 184 goto err_reg;
185 185
186 return 0; 186 return 0;
@@ -196,8 +196,8 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
196{ 196{
197 struct ctl_table *tbl; 197 struct ctl_table *tbl;
198 198
199 tbl = net->sysctl_core_hdr->ctl_table_arg; 199 tbl = net->core.sysctl_hdr->ctl_table_arg;
200 unregister_net_sysctl_table(net->sysctl_core_hdr); 200 unregister_net_sysctl_table(net->core.sysctl_hdr);
201 BUG_ON(tbl == net_core_table); 201 BUG_ON(tbl == net_core_table);
202 kfree(tbl); 202 kfree(tbl);
203} 203}
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index ba2ef94a2302..f44d492d3b74 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -23,9 +23,9 @@
23 * DCCP - specific warning and debugging macros. 23 * DCCP - specific warning and debugging macros.
24 */ 24 */
25#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \ 25#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \
26 __FUNCTION__, ##a) 26 __func__, ##a)
27#define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ 27#define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \
28 __FILE__, __LINE__, __FUNCTION__) 28 __FILE__, __LINE__, __func__)
29#define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) 29#define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0)
30#define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \ 30#define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \
31 DCCP_BUG("\"%s\" holds (exception!)", \ 31 DCCP_BUG("\"%s\" holds (exception!)", \
@@ -36,7 +36,7 @@
36 printk(fmt, ##args); \ 36 printk(fmt, ##args); \
37 } while(0) 37 } while(0)
38#define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \ 38#define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \
39 "%s: " fmt, __FUNCTION__, ##a) 39 "%s: " fmt, __func__, ##a)
40 40
41#ifdef CONFIG_IP_DCCP_DEBUG 41#ifdef CONFIG_IP_DCCP_DEBUG
42extern int dccp_debug; 42extern int dccp_debug;
@@ -296,7 +296,7 @@ extern unsigned int dccp_poll(struct file *file, struct socket *sock,
296extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, 296extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
297 int addr_len); 297 int addr_len);
298 298
299extern struct sk_buff *dccp_ctl_make_reset(struct socket *ctl, 299extern struct sk_buff *dccp_ctl_make_reset(struct sock *sk,
300 struct sk_buff *skb); 300 struct sk_buff *skb);
301extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); 301extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
302extern void dccp_send_close(struct sock *sk, const int active); 302extern void dccp_send_close(struct sock *sk, const int active);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b33704415555..b348dd70c685 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -32,11 +32,10 @@
32#include "feat.h" 32#include "feat.h"
33 33
34/* 34/*
35 * This is the global socket data structure used for responding to 35 * The per-net dccp.v4_ctl_sk socket is used for responding to
36 * the Out-of-the-blue (OOTB) packets. A control sock will be created 36 * the Out-of-the-blue (OOTB) packets. A control sock will be created
37 * for this socket at the initialization time. 37 * for this socket at the initialization time.
38 */ 38 */
39static struct socket *dccp_v4_ctl_socket;
40 39
41int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 40int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
42{ 41{
@@ -212,8 +211,9 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
212 return; 211 return;
213 } 212 }
214 213
215 sk = inet_lookup(&init_net, &dccp_hashinfo, iph->daddr, dh->dccph_dport, 214 sk = inet_lookup(dev_net(skb->dev), &dccp_hashinfo,
216 iph->saddr, dh->dccph_sport, inet_iif(skb)); 215 iph->daddr, dh->dccph_dport,
216 iph->saddr, dh->dccph_sport, inet_iif(skb));
217 if (sk == NULL) { 217 if (sk == NULL) {
218 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 218 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
219 return; 219 return;
@@ -430,7 +430,7 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
430 if (req != NULL) 430 if (req != NULL)
431 return dccp_check_req(sk, skb, req, prev); 431 return dccp_check_req(sk, skb, req, prev);
432 432
433 nsk = inet_lookup_established(&init_net, &dccp_hashinfo, 433 nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
434 iph->saddr, dh->dccph_sport, 434 iph->saddr, dh->dccph_sport,
435 iph->daddr, dh->dccph_dport, 435 iph->daddr, dh->dccph_dport,
436 inet_iif(skb)); 436 inet_iif(skb));
@@ -446,11 +446,11 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
446 return sk; 446 return sk;
447} 447}
448 448
449static struct dst_entry* dccp_v4_route_skb(struct sock *sk, 449static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
450 struct sk_buff *skb) 450 struct sk_buff *skb)
451{ 451{
452 struct rtable *rt; 452 struct rtable *rt;
453 struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif, 453 struct flowi fl = { .oif = skb->rtable->rt_iif,
454 .nl_u = { .ip4_u = 454 .nl_u = { .ip4_u =
455 { .daddr = ip_hdr(skb)->saddr, 455 { .daddr = ip_hdr(skb)->saddr,
456 .saddr = ip_hdr(skb)->daddr, 456 .saddr = ip_hdr(skb)->daddr,
@@ -463,7 +463,7 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
463 }; 463 };
464 464
465 security_skb_classify_flow(skb, &fl); 465 security_skb_classify_flow(skb, &fl);
466 if (ip_route_output_flow(&init_net, &rt, &fl, sk, 0)) { 466 if (ip_route_output_flow(net, &rt, &fl, sk, 0)) {
467 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 467 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
468 return NULL; 468 return NULL;
469 } 469 }
@@ -471,15 +471,14 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
471 return &rt->u.dst; 471 return &rt->u.dst;
472} 472}
473 473
474static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, 474static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
475 struct dst_entry *dst)
476{ 475{
477 int err = -1; 476 int err = -1;
478 struct sk_buff *skb; 477 struct sk_buff *skb;
478 struct dst_entry *dst;
479 479
480 /* First, grab a route. */ 480 dst = inet_csk_route_req(sk, req);
481 481 if (dst == NULL)
482 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
483 goto out; 482 goto out;
484 483
485 skb = dccp_make_response(sk, dst, req); 484 skb = dccp_make_response(sk, dst, req);
@@ -506,19 +505,21 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
506 const struct iphdr *rxiph; 505 const struct iphdr *rxiph;
507 struct sk_buff *skb; 506 struct sk_buff *skb;
508 struct dst_entry *dst; 507 struct dst_entry *dst;
508 struct net *net = dev_net(rxskb->dst->dev);
509 struct sock *ctl_sk = net->dccp.v4_ctl_sk;
509 510
510 /* Never send a reset in response to a reset. */ 511 /* Never send a reset in response to a reset. */
511 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) 512 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
512 return; 513 return;
513 514
514 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) 515 if (rxskb->rtable->rt_type != RTN_LOCAL)
515 return; 516 return;
516 517
517 dst = dccp_v4_route_skb(dccp_v4_ctl_socket->sk, rxskb); 518 dst = dccp_v4_route_skb(net, ctl_sk, rxskb);
518 if (dst == NULL) 519 if (dst == NULL)
519 return; 520 return;
520 521
521 skb = dccp_ctl_make_reset(dccp_v4_ctl_socket, rxskb); 522 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
522 if (skb == NULL) 523 if (skb == NULL)
523 goto out; 524 goto out;
524 525
@@ -527,10 +528,10 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
527 rxiph->daddr); 528 rxiph->daddr);
528 skb->dst = dst_clone(dst); 529 skb->dst = dst_clone(dst);
529 530
530 bh_lock_sock(dccp_v4_ctl_socket->sk); 531 bh_lock_sock(ctl_sk);
531 err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, 532 err = ip_build_and_send_pkt(skb, ctl_sk,
532 rxiph->daddr, rxiph->saddr, NULL); 533 rxiph->daddr, rxiph->saddr, NULL);
533 bh_unlock_sock(dccp_v4_ctl_socket->sk); 534 bh_unlock_sock(ctl_sk);
534 535
535 if (net_xmit_eval(err) == 0) { 536 if (net_xmit_eval(err) == 0) {
536 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 537 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
@@ -563,8 +564,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
563 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 564 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
564 565
565 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ 566 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
566 if (((struct rtable *)skb->dst)->rt_flags & 567 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
567 (RTCF_BROADCAST | RTCF_MULTICAST))
568 return 0; /* discard, don't send a reset here */ 568 return 0; /* discard, don't send a reset here */
569 569
570 if (dccp_bad_service_code(sk, service)) { 570 if (dccp_bad_service_code(sk, service)) {
@@ -619,7 +619,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
619 dreq->dreq_iss = dccp_v4_init_sequence(skb); 619 dreq->dreq_iss = dccp_v4_init_sequence(skb);
620 dreq->dreq_service = service; 620 dreq->dreq_service = service;
621 621
622 if (dccp_v4_send_response(sk, req, NULL)) 622 if (dccp_v4_send_response(sk, req))
623 goto drop_and_free; 623 goto drop_and_free;
624 624
625 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 625 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
@@ -810,7 +810,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
810 810
811 /* Step 2: 811 /* Step 2:
812 * Look up flow ID in table and get corresponding socket */ 812 * Look up flow ID in table and get corresponding socket */
813 sk = __inet_lookup(&init_net, &dccp_hashinfo, 813 sk = __inet_lookup(dev_net(skb->dst->dev), &dccp_hashinfo,
814 iph->saddr, dh->dccph_sport, 814 iph->saddr, dh->dccph_sport,
815 iph->daddr, dh->dccph_dport, inet_iif(skb)); 815 iph->daddr, dh->dccph_dport, inet_iif(skb));
816 /* 816 /*
@@ -916,8 +916,6 @@ static struct timewait_sock_ops dccp_timewait_sock_ops = {
916 .twsk_obj_size = sizeof(struct inet_timewait_sock), 916 .twsk_obj_size = sizeof(struct inet_timewait_sock),
917}; 917};
918 918
919DEFINE_PROTO_INUSE(dccp_v4)
920
921static struct proto dccp_v4_prot = { 919static struct proto dccp_v4_prot = {
922 .name = "DCCP", 920 .name = "DCCP",
923 .owner = THIS_MODULE, 921 .owner = THIS_MODULE,
@@ -942,18 +940,18 @@ static struct proto dccp_v4_prot = {
942 .obj_size = sizeof(struct dccp_sock), 940 .obj_size = sizeof(struct dccp_sock),
943 .rsk_prot = &dccp_request_sock_ops, 941 .rsk_prot = &dccp_request_sock_ops,
944 .twsk_prot = &dccp_timewait_sock_ops, 942 .twsk_prot = &dccp_timewait_sock_ops,
945 .hashinfo = &dccp_hashinfo, 943 .h.hashinfo = &dccp_hashinfo,
946#ifdef CONFIG_COMPAT 944#ifdef CONFIG_COMPAT
947 .compat_setsockopt = compat_dccp_setsockopt, 945 .compat_setsockopt = compat_dccp_setsockopt,
948 .compat_getsockopt = compat_dccp_getsockopt, 946 .compat_getsockopt = compat_dccp_getsockopt,
949#endif 947#endif
950 REF_PROTO_INUSE(dccp_v4)
951}; 948};
952 949
953static struct net_protocol dccp_v4_protocol = { 950static struct net_protocol dccp_v4_protocol = {
954 .handler = dccp_v4_rcv, 951 .handler = dccp_v4_rcv,
955 .err_handler = dccp_v4_err, 952 .err_handler = dccp_v4_err,
956 .no_policy = 1, 953 .no_policy = 1,
954 .netns_ok = 1,
957}; 955};
958 956
959static const struct proto_ops inet_dccp_ops = { 957static const struct proto_ops inet_dccp_ops = {
@@ -993,6 +991,25 @@ static struct inet_protosw dccp_v4_protosw = {
993 .flags = INET_PROTOSW_ICSK, 991 .flags = INET_PROTOSW_ICSK,
994}; 992};
995 993
994static int dccp_v4_init_net(struct net *net)
995{
996 int err;
997
998 err = inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET,
999 SOCK_DCCP, IPPROTO_DCCP, net);
1000 return err;
1001}
1002
1003static void dccp_v4_exit_net(struct net *net)
1004{
1005 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
1006}
1007
1008static struct pernet_operations dccp_v4_ops = {
1009 .init = dccp_v4_init_net,
1010 .exit = dccp_v4_exit_net,
1011};
1012
996static int __init dccp_v4_init(void) 1013static int __init dccp_v4_init(void)
997{ 1014{
998 int err = proto_register(&dccp_v4_prot, 1); 1015 int err = proto_register(&dccp_v4_prot, 1);
@@ -1006,13 +1023,12 @@ static int __init dccp_v4_init(void)
1006 1023
1007 inet_register_protosw(&dccp_v4_protosw); 1024 inet_register_protosw(&dccp_v4_protosw);
1008 1025
1009 err = inet_csk_ctl_sock_create(&dccp_v4_ctl_socket, PF_INET, 1026 err = register_pernet_subsys(&dccp_v4_ops);
1010 SOCK_DCCP, IPPROTO_DCCP);
1011 if (err) 1027 if (err)
1012 goto out_unregister_protosw; 1028 goto out_destroy_ctl_sock;
1013out: 1029out:
1014 return err; 1030 return err;
1015out_unregister_protosw: 1031out_destroy_ctl_sock:
1016 inet_unregister_protosw(&dccp_v4_protosw); 1032 inet_unregister_protosw(&dccp_v4_protosw);
1017 inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); 1033 inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1018out_proto_unregister: 1034out_proto_unregister:
@@ -1022,6 +1038,7 @@ out_proto_unregister:
1022 1038
1023static void __exit dccp_v4_exit(void) 1039static void __exit dccp_v4_exit(void)
1024{ 1040{
1041 unregister_pernet_subsys(&dccp_v4_ops);
1025 inet_unregister_protosw(&dccp_v4_protosw); 1042 inet_unregister_protosw(&dccp_v4_protosw);
1026 inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); 1043 inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1027 proto_unregister(&dccp_v4_prot); 1044 proto_unregister(&dccp_v4_prot);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 490333d47c7b..9b1129bb7ece 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -33,8 +33,7 @@
33#include "ipv6.h" 33#include "ipv6.h"
34#include "feat.h" 34#include "feat.h"
35 35
36/* Socket used for sending RSTs and ACKs */ 36/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
37static struct socket *dccp_v6_ctl_socket;
38 37
39static struct inet_connection_sock_af_ops dccp_ipv6_mapped; 38static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
40static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; 39static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
@@ -95,7 +94,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
95 int err; 94 int err;
96 __u64 seq; 95 __u64 seq;
97 96
98 sk = inet6_lookup(&init_net, &dccp_hashinfo, &hdr->daddr, dh->dccph_dport, 97 sk = inet6_lookup(dev_net(skb->dev), &dccp_hashinfo,
98 &hdr->daddr, dh->dccph_dport,
99 &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); 99 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
100 100
101 if (sk == NULL) { 101 if (sk == NULL) {
@@ -224,8 +224,7 @@ out:
224} 224}
225 225
226 226
227static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, 227static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
228 struct dst_entry *dst)
229{ 228{
230 struct inet6_request_sock *ireq6 = inet6_rsk(req); 229 struct inet6_request_sock *ireq6 = inet6_rsk(req);
231 struct ipv6_pinfo *np = inet6_sk(sk); 230 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -234,6 +233,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
234 struct in6_addr *final_p = NULL, final; 233 struct in6_addr *final_p = NULL, final;
235 struct flowi fl; 234 struct flowi fl;
236 int err = -1; 235 int err = -1;
236 struct dst_entry *dst;
237 237
238 memset(&fl, 0, sizeof(fl)); 238 memset(&fl, 0, sizeof(fl));
239 fl.proto = IPPROTO_DCCP; 239 fl.proto = IPPROTO_DCCP;
@@ -245,28 +245,26 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
245 fl.fl_ip_sport = inet_sk(sk)->sport; 245 fl.fl_ip_sport = inet_sk(sk)->sport;
246 security_req_classify_flow(req, &fl); 246 security_req_classify_flow(req, &fl);
247 247
248 if (dst == NULL) { 248 opt = np->opt;
249 opt = np->opt;
250 249
251 if (opt != NULL && opt->srcrt != NULL) { 250 if (opt != NULL && opt->srcrt != NULL) {
252 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; 251 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
253 252
254 ipv6_addr_copy(&final, &fl.fl6_dst); 253 ipv6_addr_copy(&final, &fl.fl6_dst);
255 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 254 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
256 final_p = &final; 255 final_p = &final;
257 } 256 }
258 257
259 err = ip6_dst_lookup(sk, &dst, &fl); 258 err = ip6_dst_lookup(sk, &dst, &fl);
260 if (err) 259 if (err)
261 goto done; 260 goto done;
262 261
263 if (final_p) 262 if (final_p)
264 ipv6_addr_copy(&fl.fl6_dst, final_p); 263 ipv6_addr_copy(&fl.fl6_dst, final_p);
265 264
266 err = xfrm_lookup(&dst, &fl, sk, 0); 265 err = xfrm_lookup(&dst, &fl, sk, 0);
267 if (err < 0) 266 if (err < 0)
268 goto done; 267 goto done;
269 }
270 268
271 skb = dccp_make_response(sk, dst, req); 269 skb = dccp_make_response(sk, dst, req);
272 if (skb != NULL) { 270 if (skb != NULL) {
@@ -298,6 +296,8 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
298 struct ipv6hdr *rxip6h; 296 struct ipv6hdr *rxip6h;
299 struct sk_buff *skb; 297 struct sk_buff *skb;
300 struct flowi fl; 298 struct flowi fl;
299 struct net *net = dev_net(rxskb->dst->dev);
300 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
301 301
302 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) 302 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
303 return; 303 return;
@@ -305,7 +305,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
305 if (!ipv6_unicast_destination(rxskb)) 305 if (!ipv6_unicast_destination(rxskb))
306 return; 306 return;
307 307
308 skb = dccp_ctl_make_reset(dccp_v6_ctl_socket, rxskb); 308 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
309 if (skb == NULL) 309 if (skb == NULL)
310 return; 310 return;
311 311
@@ -324,9 +324,9 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
324 security_skb_classify_flow(rxskb, &fl); 324 security_skb_classify_flow(rxskb, &fl);
325 325
326 /* sk = NULL, but it is safe for now. RST socket required. */ 326 /* sk = NULL, but it is safe for now. RST socket required. */
327 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) { 327 if (!ip6_dst_lookup(ctl_sk, &skb->dst, &fl)) {
328 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) { 328 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
329 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0); 329 ip6_xmit(ctl_sk, skb, &fl, NULL, 0);
330 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 330 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
331 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 331 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
332 return; 332 return;
@@ -360,7 +360,7 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
360 if (req != NULL) 360 if (req != NULL)
361 return dccp_check_req(sk, skb, req, prev); 361 return dccp_check_req(sk, skb, req, prev);
362 362
363 nsk = __inet6_lookup_established(&init_net, &dccp_hashinfo, 363 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
364 &iph->saddr, dh->dccph_sport, 364 &iph->saddr, dh->dccph_sport,
365 &iph->daddr, ntohs(dh->dccph_dport), 365 &iph->daddr, ntohs(dh->dccph_dport),
366 inet6_iif(skb)); 366 inet6_iif(skb));
@@ -448,7 +448,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
448 dreq->dreq_iss = dccp_v6_init_sequence(skb); 448 dreq->dreq_iss = dccp_v6_init_sequence(skb);
449 dreq->dreq_service = service; 449 dreq->dreq_service = service;
450 450
451 if (dccp_v6_send_response(sk, req, NULL)) 451 if (dccp_v6_send_response(sk, req))
452 goto drop_and_free; 452 goto drop_and_free;
453 453
454 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 454 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
@@ -625,7 +625,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
625 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 625 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
626 626
627 __inet6_hash(newsk); 627 __inet6_hash(newsk);
628 inet_inherit_port(sk, newsk); 628 __inet_inherit_port(sk, newsk);
629 629
630 return newsk; 630 return newsk;
631 631
@@ -791,8 +791,8 @@ static int dccp_v6_rcv(struct sk_buff *skb)
791 791
792 /* Step 2: 792 /* Step 2:
793 * Look up flow ID in table and get corresponding socket */ 793 * Look up flow ID in table and get corresponding socket */
794 sk = __inet6_lookup(&init_net, &dccp_hashinfo, &ipv6_hdr(skb)->saddr, 794 sk = __inet6_lookup(dev_net(skb->dst->dev), &dccp_hashinfo,
795 dh->dccph_sport, 795 &ipv6_hdr(skb)->saddr, dh->dccph_sport,
796 &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport), 796 &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport),
797 inet6_iif(skb)); 797 inet6_iif(skb));
798 /* 798 /*
@@ -1102,8 +1102,6 @@ static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1102 .twsk_obj_size = sizeof(struct dccp6_timewait_sock), 1102 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1103}; 1103};
1104 1104
1105DEFINE_PROTO_INUSE(dccp_v6)
1106
1107static struct proto dccp_v6_prot = { 1105static struct proto dccp_v6_prot = {
1108 .name = "DCCPv6", 1106 .name = "DCCPv6",
1109 .owner = THIS_MODULE, 1107 .owner = THIS_MODULE,
@@ -1128,12 +1126,11 @@ static struct proto dccp_v6_prot = {
1128 .obj_size = sizeof(struct dccp6_sock), 1126 .obj_size = sizeof(struct dccp6_sock),
1129 .rsk_prot = &dccp6_request_sock_ops, 1127 .rsk_prot = &dccp6_request_sock_ops,
1130 .twsk_prot = &dccp6_timewait_sock_ops, 1128 .twsk_prot = &dccp6_timewait_sock_ops,
1131 .hashinfo = &dccp_hashinfo, 1129 .h.hashinfo = &dccp_hashinfo,
1132#ifdef CONFIG_COMPAT 1130#ifdef CONFIG_COMPAT
1133 .compat_setsockopt = compat_dccp_setsockopt, 1131 .compat_setsockopt = compat_dccp_setsockopt,
1134 .compat_getsockopt = compat_dccp_getsockopt, 1132 .compat_getsockopt = compat_dccp_getsockopt,
1135#endif 1133#endif
1136 REF_PROTO_INUSE(dccp_v6)
1137}; 1134};
1138 1135
1139static struct inet6_protocol dccp_v6_protocol = { 1136static struct inet6_protocol dccp_v6_protocol = {
@@ -1176,6 +1173,25 @@ static struct inet_protosw dccp_v6_protosw = {
1176 .flags = INET_PROTOSW_ICSK, 1173 .flags = INET_PROTOSW_ICSK,
1177}; 1174};
1178 1175
1176static int dccp_v6_init_net(struct net *net)
1177{
1178 int err;
1179
1180 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1181 SOCK_DCCP, IPPROTO_DCCP, net);
1182 return err;
1183}
1184
1185static void dccp_v6_exit_net(struct net *net)
1186{
1187 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1188}
1189
1190static struct pernet_operations dccp_v6_ops = {
1191 .init = dccp_v6_init_net,
1192 .exit = dccp_v6_exit_net,
1193};
1194
1179static int __init dccp_v6_init(void) 1195static int __init dccp_v6_init(void)
1180{ 1196{
1181 int err = proto_register(&dccp_v6_prot, 1); 1197 int err = proto_register(&dccp_v6_prot, 1);
@@ -1189,13 +1205,13 @@ static int __init dccp_v6_init(void)
1189 1205
1190 inet6_register_protosw(&dccp_v6_protosw); 1206 inet6_register_protosw(&dccp_v6_protosw);
1191 1207
1192 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6, 1208 err = register_pernet_subsys(&dccp_v6_ops);
1193 SOCK_DCCP, IPPROTO_DCCP);
1194 if (err != 0) 1209 if (err != 0)
1195 goto out_unregister_protosw; 1210 goto out_destroy_ctl_sock;
1196out: 1211out:
1197 return err; 1212 return err;
1198out_unregister_protosw: 1213
1214out_destroy_ctl_sock:
1199 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1215 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1200 inet6_unregister_protosw(&dccp_v6_protosw); 1216 inet6_unregister_protosw(&dccp_v6_protosw);
1201out_unregister_proto: 1217out_unregister_proto:
@@ -1205,6 +1221,7 @@ out_unregister_proto:
1205 1221
1206static void __exit dccp_v6_exit(void) 1222static void __exit dccp_v6_exit(void)
1207{ 1223{
1224 unregister_pernet_subsys(&dccp_v6_ops);
1208 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1225 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1209 inet6_unregister_protosw(&dccp_v6_protosw); 1226 inet6_unregister_protosw(&dccp_v6_protosw);
1210 proto_unregister(&dccp_v6_prot); 1227 proto_unregister(&dccp_v6_prot);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 027d1814e1ab..33ad48321b08 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -216,7 +216,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
216 * counter (backoff, monitored by dccp_response_timer). 216 * counter (backoff, monitored by dccp_response_timer).
217 */ 217 */
218 req->retrans++; 218 req->retrans++;
219 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 219 req->rsk_ops->rtx_syn_ack(sk, req);
220 } 220 }
221 /* Network Duplicate, discard packet */ 221 /* Network Duplicate, discard packet */
222 return NULL; 222 return NULL;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 3d7d628d870d..1f8a9b64c083 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -347,7 +347,7 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
347EXPORT_SYMBOL_GPL(dccp_make_response); 347EXPORT_SYMBOL_GPL(dccp_make_response);
348 348
349/* answer offending packet in @rcv_skb with Reset from control socket @ctl */ 349/* answer offending packet in @rcv_skb with Reset from control socket @ctl */
350struct sk_buff *dccp_ctl_make_reset(struct socket *ctl, struct sk_buff *rcv_skb) 350struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
351{ 351{
352 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh; 352 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
353 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb); 353 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
@@ -357,11 +357,11 @@ struct sk_buff *dccp_ctl_make_reset(struct socket *ctl, struct sk_buff *rcv_skb)
357 struct dccp_hdr_reset *dhr; 357 struct dccp_hdr_reset *dhr;
358 struct sk_buff *skb; 358 struct sk_buff *skb;
359 359
360 skb = alloc_skb(ctl->sk->sk_prot->max_header, GFP_ATOMIC); 360 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
361 if (skb == NULL) 361 if (skb == NULL)
362 return NULL; 362 return NULL;
363 363
364 skb_reserve(skb, ctl->sk->sk_prot->max_header); 364 skb_reserve(skb, sk->sk_prot->max_header);
365 365
366 /* Swap the send and the receive. */ 366 /* Swap the send and the receive. */
367 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); 367 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index c91d3c1fd30d..a4c1b36f4bc7 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1010,33 +1010,14 @@ void dccp_shutdown(struct sock *sk, int how)
1010 1010
1011EXPORT_SYMBOL_GPL(dccp_shutdown); 1011EXPORT_SYMBOL_GPL(dccp_shutdown);
1012 1012
1013static int __init dccp_mib_init(void) 1013static inline int dccp_mib_init(void)
1014{ 1014{
1015 int rc = -ENOMEM; 1015 return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib));
1016
1017 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
1018 if (dccp_statistics[0] == NULL)
1019 goto out;
1020
1021 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
1022 if (dccp_statistics[1] == NULL)
1023 goto out_free_one;
1024
1025 rc = 0;
1026out:
1027 return rc;
1028out_free_one:
1029 free_percpu(dccp_statistics[0]);
1030 dccp_statistics[0] = NULL;
1031 goto out;
1032
1033} 1016}
1034 1017
1035static void dccp_mib_exit(void) 1018static inline void dccp_mib_exit(void)
1036{ 1019{
1037 free_percpu(dccp_statistics[0]); 1020 snmp_mib_free((void**)dccp_statistics);
1038 free_percpu(dccp_statistics[1]);
1039 dccp_statistics[0] = dccp_statistics[1] = NULL;
1040} 1021}
1041 1022
1042static int thash_entries; 1023static int thash_entries;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index acd48ee522d6..fc2efe899e91 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1094,7 +1094,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1094 1094
1095 cb = DN_SKB_CB(skb); 1095 cb = DN_SKB_CB(skb);
1096 sk->sk_ack_backlog--; 1096 sk->sk_ack_backlog--;
1097 newsk = dn_alloc_sock(sk->sk_net, newsock, sk->sk_allocation); 1097 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation);
1098 if (newsk == NULL) { 1098 if (newsk == NULL) {
1099 release_sock(sk); 1099 release_sock(sk);
1100 kfree_skb(skb); 1100 kfree_skb(skb);
@@ -2089,7 +2089,7 @@ static int dn_device_event(struct notifier_block *this, unsigned long event,
2089{ 2089{
2090 struct net_device *dev = (struct net_device *)ptr; 2090 struct net_device *dev = (struct net_device *)ptr;
2091 2091
2092 if (dev->nd_net != &init_net) 2092 if (dev_net(dev) != &init_net)
2093 return NOTIFY_DONE; 2093 return NOTIFY_DONE;
2094 2094
2095 switch(event) { 2095 switch(event) {
@@ -2320,25 +2320,8 @@ static const struct seq_operations dn_socket_seq_ops = {
2320 2320
2321static int dn_socket_seq_open(struct inode *inode, struct file *file) 2321static int dn_socket_seq_open(struct inode *inode, struct file *file)
2322{ 2322{
2323 struct seq_file *seq; 2323 return seq_open_private(file, &dn_socket_seq_ops,
2324 int rc = -ENOMEM; 2324 sizeof(struct dn_iter_state));
2325 struct dn_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
2326
2327 if (!s)
2328 goto out;
2329
2330 rc = seq_open(file, &dn_socket_seq_ops);
2331 if (rc)
2332 goto out_kfree;
2333
2334 seq = file->private_data;
2335 seq->private = s;
2336 memset(s, 0, sizeof(*s));
2337out:
2338 return rc;
2339out_kfree:
2340 kfree(s);
2341 goto out;
2342} 2325}
2343 2326
2344static const struct file_operations dn_socket_seq_fops = { 2327static const struct file_operations dn_socket_seq_fops = {
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 1bbfce5f7a2d..2f0ac3c3eb71 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -625,7 +625,7 @@ static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = {
625 625
626static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 626static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
627{ 627{
628 struct net *net = skb->sk->sk_net; 628 struct net *net = sock_net(skb->sk);
629 struct nlattr *tb[IFA_MAX+1]; 629 struct nlattr *tb[IFA_MAX+1];
630 struct dn_dev *dn_db; 630 struct dn_dev *dn_db;
631 struct ifaddrmsg *ifm; 631 struct ifaddrmsg *ifm;
@@ -663,7 +663,7 @@ errout:
663 663
664static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 664static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
665{ 665{
666 struct net *net = skb->sk->sk_net; 666 struct net *net = sock_net(skb->sk);
667 struct nlattr *tb[IFA_MAX+1]; 667 struct nlattr *tb[IFA_MAX+1];
668 struct net_device *dev; 668 struct net_device *dev;
669 struct dn_dev *dn_db; 669 struct dn_dev *dn_db;
@@ -779,7 +779,7 @@ errout:
779 779
780static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 780static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
781{ 781{
782 struct net *net = skb->sk->sk_net; 782 struct net *net = sock_net(skb->sk);
783 int idx, dn_idx = 0, skip_ndevs, skip_naddr; 783 int idx, dn_idx = 0, skip_ndevs, skip_naddr;
784 struct net_device *dev; 784 struct net_device *dev;
785 struct dn_dev *dn_db; 785 struct dn_dev *dn_db;
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 4aa9a423e606..27ea2e9b080a 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -504,7 +504,7 @@ static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta)
504 504
505static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 505static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
506{ 506{
507 struct net *net = skb->sk->sk_net; 507 struct net *net = sock_net(skb->sk);
508 struct dn_fib_table *tb; 508 struct dn_fib_table *tb;
509 struct rtattr **rta = arg; 509 struct rtattr **rta = arg;
510 struct rtmsg *r = NLMSG_DATA(nlh); 510 struct rtmsg *r = NLMSG_DATA(nlh);
@@ -524,7 +524,7 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
524 524
525static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 525static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
526{ 526{
527 struct net *net = skb->sk->sk_net; 527 struct net *net = sock_net(skb->sk);
528 struct dn_fib_table *tb; 528 struct dn_fib_table *tb;
529 struct rtattr **rta = arg; 529 struct rtattr **rta = arg;
530 struct rtmsg *r = NLMSG_DATA(nlh); 530 struct rtmsg *r = NLMSG_DATA(nlh);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9dc0abb50eaf..2f665a516476 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -580,7 +580,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
580 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 580 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr;
581 unsigned char padlen = 0; 581 unsigned char padlen = 0;
582 582
583 if (dev->nd_net != &init_net) 583 if (dev_net(dev) != &init_net)
584 goto dump_it; 584 goto dump_it;
585 585
586 if (dn == NULL) 586 if (dn == NULL)
@@ -1512,7 +1512,7 @@ rtattr_failure:
1512 */ 1512 */
1513static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) 1513static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
1514{ 1514{
1515 struct net *net = in_skb->sk->sk_net; 1515 struct net *net = sock_net(in_skb->sk);
1516 struct rtattr **rta = arg; 1516 struct rtattr **rta = arg;
1517 struct rtmsg *rtm = NLMSG_DATA(nlh); 1517 struct rtmsg *rtm = NLMSG_DATA(nlh);
1518 struct dn_route *rt = NULL; 1518 struct dn_route *rt = NULL;
@@ -1601,7 +1601,7 @@ out_free:
1601 */ 1601 */
1602int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) 1602int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1603{ 1603{
1604 struct net *net = skb->sk->sk_net; 1604 struct net *net = sock_net(skb->sk);
1605 struct dn_route *rt; 1605 struct dn_route *rt;
1606 int h, s_h; 1606 int h, s_h;
1607 int idx, s_idx; 1607 int idx, s_idx;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index e09d915dbd77..3a2830ac89c2 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -463,7 +463,7 @@ static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
463 463
464int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) 464int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
465{ 465{
466 struct net *net = skb->sk->sk_net; 466 struct net *net = sock_net(skb->sk);
467 unsigned int h, s_h; 467 unsigned int h, s_h;
468 unsigned int e = 0, s_e; 468 unsigned int e = 0, s_e;
469 struct dn_fib_table *tb; 469 struct dn_fib_table *tb;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index bc0f6252613f..68d154480043 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -1064,7 +1064,7 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
1064 struct sock *sk; 1064 struct sock *sk;
1065 struct ec_device *edev = dev->ec_ptr; 1065 struct ec_device *edev = dev->ec_ptr;
1066 1066
1067 if (dev->nd_net != &init_net) 1067 if (dev_net(dev) != &init_net)
1068 goto drop; 1068 goto drop;
1069 1069
1070 if (skb->pkt_type == PACKET_OTHERHOST) 1070 if (skb->pkt_type == PACKET_OTHERHOST)
@@ -1121,7 +1121,7 @@ static int econet_notifier(struct notifier_block *this, unsigned long msg, void
1121 struct net_device *dev = (struct net_device *)data; 1121 struct net_device *dev = (struct net_device *)data;
1122 struct ec_device *edev; 1122 struct ec_device *edev;
1123 1123
1124 if (dev->nd_net != &init_net) 1124 if (dev_net(dev) != &init_net)
1125 return NOTIFY_DONE; 1125 return NOTIFY_DONE;
1126 1126
1127 switch (msg) { 1127 switch (msg) {
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
index bd501046c9c0..94ed7d3cd9da 100644
--- a/net/ieee80211/Kconfig
+++ b/net/ieee80211/Kconfig
@@ -71,4 +71,3 @@ config IEEE80211_CRYPT_TKIP
71 This can be compiled as a module and it will be called 71 This can be compiled as a module and it will be called
72 "ieee80211_crypt_tkip". 72 "ieee80211_crypt_tkip".
73 73
74source "net/ieee80211/softmac/Kconfig"
diff --git a/net/ieee80211/Makefile b/net/ieee80211/Makefile
index 796a7c76ee48..f988417121da 100644
--- a/net/ieee80211/Makefile
+++ b/net/ieee80211/Makefile
@@ -10,4 +10,3 @@ ieee80211-objs := \
10 ieee80211_wx.o \ 10 ieee80211_wx.o \
11 ieee80211_geo.o 11 ieee80211_geo.o
12 12
13obj-$(CONFIG_IEEE80211_SOFTMAC) += softmac/
diff --git a/net/ieee80211/softmac/Kconfig b/net/ieee80211/softmac/Kconfig
deleted file mode 100644
index 2811651cb134..000000000000
--- a/net/ieee80211/softmac/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
1config IEEE80211_SOFTMAC
2 tristate "Software MAC add-on to the IEEE 802.11 networking stack"
3 depends on IEEE80211 && EXPERIMENTAL
4 select WIRELESS_EXT
5 select IEEE80211_CRYPT_WEP
6 ---help---
7 This option enables the hardware independent software MAC addon
8 for the IEEE 802.11 networking stack.
9
10config IEEE80211_SOFTMAC_DEBUG
11 bool "Enable full debugging output"
12 depends on IEEE80211_SOFTMAC
diff --git a/net/ieee80211/softmac/Makefile b/net/ieee80211/softmac/Makefile
deleted file mode 100644
index bfcb391bb2c7..000000000000
--- a/net/ieee80211/softmac/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1obj-$(CONFIG_IEEE80211_SOFTMAC) += ieee80211softmac.o
2ieee80211softmac-objs := \
3 ieee80211softmac_io.o \
4 ieee80211softmac_auth.o \
5 ieee80211softmac_module.o \
6 ieee80211softmac_scan.o \
7 ieee80211softmac_wx.o \
8 ieee80211softmac_assoc.o \
9 ieee80211softmac_event.o
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
deleted file mode 100644
index c4d122ddd72c..000000000000
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ /dev/null
@@ -1,489 +0,0 @@
1/*
2 * This file contains the softmac's association logic.
3 *
4 * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net>
5 * Joseph Jezak <josejx@gentoo.org>
6 * Larry Finger <Larry.Finger@lwfinger.net>
7 * Danny van Dyk <kugelfang@gentoo.org>
8 * Michael Buesch <mbuesch@freenet.de>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 */
26
27#include "ieee80211softmac_priv.h"
28
29/*
30 * Overview
31 *
32 * Before you can associate, you have to authenticate.
33 *
34 */
35
36/* Sends out an association request to the desired AP */
37static void
38ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net)
39{
40 unsigned long flags;
41
42 /* Switch to correct channel for this network */
43 mac->set_channel(mac->dev, net->channel);
44
45 /* Send association request */
46 ieee80211softmac_send_mgt_frame(mac, net, IEEE80211_STYPE_ASSOC_REQ, 0);
47
48 dprintk(KERN_INFO PFX "sent association request!\n");
49
50 spin_lock_irqsave(&mac->lock, flags);
51 mac->associnfo.associated = 0; /* just to make sure */
52
53 /* Set a timer for timeout */
54 /* FIXME: make timeout configurable */
55 if (likely(mac->running))
56 queue_delayed_work(mac->wq, &mac->associnfo.timeout, 5 * HZ);
57 spin_unlock_irqrestore(&mac->lock, flags);
58}
59
60void
61ieee80211softmac_assoc_timeout(struct work_struct *work)
62{
63 struct ieee80211softmac_device *mac =
64 container_of(work, struct ieee80211softmac_device,
65 associnfo.timeout.work);
66 struct ieee80211softmac_network *n;
67
68 mutex_lock(&mac->associnfo.mutex);
69 /* we might race against ieee80211softmac_handle_assoc_response,
70 * so make sure only one of us does something */
71 if (!mac->associnfo.associating)
72 goto out;
73 mac->associnfo.associating = 0;
74 mac->associnfo.bssvalid = 0;
75 mac->associnfo.associated = 0;
76
77 n = ieee80211softmac_get_network_by_bssid_locked(mac, mac->associnfo.bssid);
78
79 dprintk(KERN_INFO PFX "assoc request timed out!\n");
80 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, n);
81out:
82 mutex_unlock(&mac->associnfo.mutex);
83}
84
85void
86ieee80211softmac_disassoc(struct ieee80211softmac_device *mac)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(&mac->lock, flags);
91 if (mac->associnfo.associating)
92 cancel_delayed_work(&mac->associnfo.timeout);
93
94 netif_carrier_off(mac->dev);
95
96 mac->associnfo.associated = 0;
97 mac->associnfo.bssvalid = 0;
98 mac->associnfo.associating = 0;
99 ieee80211softmac_init_bss(mac);
100 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL);
101 spin_unlock_irqrestore(&mac->lock, flags);
102}
103
104/* Sends out a disassociation request to the desired AP */
105void
106ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason)
107{
108 struct ieee80211softmac_network *found;
109
110 if (mac->associnfo.bssvalid && mac->associnfo.associated) {
111 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
112 if (found)
113 ieee80211softmac_send_mgt_frame(mac, found, IEEE80211_STYPE_DISASSOC, reason);
114 }
115
116 ieee80211softmac_disassoc(mac);
117}
118
119static inline int
120we_support_all_basic_rates(struct ieee80211softmac_device *mac, u8 *from, u8 from_len)
121{
122 int idx;
123 u8 rate;
124
125 for (idx = 0; idx < (from_len); idx++) {
126 rate = (from)[idx];
127 if (!(rate & IEEE80211_BASIC_RATE_MASK))
128 continue;
129 rate &= ~IEEE80211_BASIC_RATE_MASK;
130 if (!ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate))
131 return 0;
132 }
133 return 1;
134}
135
136static int
137network_matches_request(struct ieee80211softmac_device *mac, struct ieee80211_network *net)
138{
139 /* we cannot associate to networks whose name we don't know */
140 if (ieee80211_is_empty_essid(net->ssid, net->ssid_len))
141 return 0;
142 /* do not associate to a network whose BSSBasicRateSet we cannot support */
143 if (!we_support_all_basic_rates(mac, net->rates, net->rates_len))
144 return 0;
145 /* do we really need to check the ex rates? */
146 if (!we_support_all_basic_rates(mac, net->rates_ex, net->rates_ex_len))
147 return 0;
148
149 /* assume that users know what they're doing ...
150 * (note we don't let them select a net we're incompatible with) */
151 if (mac->associnfo.bssfixed) {
152 return !memcmp(mac->associnfo.bssid, net->bssid, ETH_ALEN);
153 }
154
155 /* if 'ANY' network requested, take any that doesn't have privacy enabled */
156 if (mac->associnfo.req_essid.len == 0
157 && !(net->capability & WLAN_CAPABILITY_PRIVACY))
158 return 1;
159 if (net->ssid_len != mac->associnfo.req_essid.len)
160 return 0;
161 if (!memcmp(net->ssid, mac->associnfo.req_essid.data, mac->associnfo.req_essid.len))
162 return 1;
163 return 0;
164}
165
166static void
167ieee80211softmac_assoc_notify_scan(struct net_device *dev, int event_type, void *context)
168{
169 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
170 ieee80211softmac_assoc_work(&mac->associnfo.work.work);
171}
172
173static void
174ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void *context)
175{
176 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
177
178 switch (event_type) {
179 case IEEE80211SOFTMAC_EVENT_AUTHENTICATED:
180 ieee80211softmac_assoc_work(&mac->associnfo.work.work);
181 break;
182 case IEEE80211SOFTMAC_EVENT_AUTH_FAILED:
183 case IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT:
184 ieee80211softmac_disassoc(mac);
185 break;
186 }
187}
188
189/* This function is called to handle userspace requests (asynchronously) */
190void
191ieee80211softmac_assoc_work(struct work_struct *work)
192{
193 struct ieee80211softmac_device *mac =
194 container_of(work, struct ieee80211softmac_device,
195 associnfo.work.work);
196 struct ieee80211softmac_network *found = NULL;
197 struct ieee80211_network *net = NULL, *best = NULL;
198 int bssvalid;
199 unsigned long flags;
200
201 mutex_lock(&mac->associnfo.mutex);
202
203 if (!mac->associnfo.associating)
204 goto out;
205
206 /* ieee80211_disassoc might clear this */
207 bssvalid = mac->associnfo.bssvalid;
208
209 /* meh */
210 if (mac->associnfo.associated)
211 ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
212
213 /* try to find the requested network in our list, if we found one already */
214 if (bssvalid || mac->associnfo.bssfixed)
215 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
216
217 /* Search the ieee80211 networks for this network if we didn't find it by bssid,
218 * but only if we've scanned at least once (to get a better list of networks to
219 * select from). If we have not scanned before, the !found logic below will be
220 * invoked and will scan. */
221 if (!found && (mac->associnfo.scan_retry < IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT))
222 {
223 s8 rssi = -128; /* if I don't initialise, gcc emits an invalid warning
224 because it cannot follow the best pointer logic. */
225 spin_lock_irqsave(&mac->ieee->lock, flags);
226 list_for_each_entry(net, &mac->ieee->network_list, list) {
227 /* we're supposed to find the network with
228 * the best signal here, as we're asked to join
229 * any network with a specific ESSID, and many
230 * different ones could have that.
231 *
232 * I'll for now just go with the reported rssi.
233 *
234 * We also should take into account the rateset
235 * here to find the best BSSID to try.
236 */
237 if (network_matches_request(mac, net)) {
238 if (!best) {
239 best = net;
240 rssi = best->stats.rssi;
241 continue;
242 }
243 /* we already had a matching network, so
244 * compare their properties to get the
245 * better of the two ... (see above)
246 */
247 if (rssi < net->stats.rssi) {
248 best = net;
249 rssi = best->stats.rssi;
250 }
251 }
252 }
253 /* if we unlock here, we might get interrupted and the `best'
254 * pointer could go stale */
255 if (best) {
256 found = ieee80211softmac_create_network(mac, best);
257 /* if found is still NULL, then we got -ENOMEM somewhere */
258 if (found)
259 ieee80211softmac_add_network(mac, found);
260 }
261 spin_unlock_irqrestore(&mac->ieee->lock, flags);
262 }
263
264 if (!found) {
265 if (mac->associnfo.scan_retry > 0) {
266 mac->associnfo.scan_retry--;
267
268 /* We know of no such network. Let's scan.
269 * NB: this also happens if we had no memory to copy the network info...
270 * Maybe we can hope to have more memory after scanning finishes ;)
271 */
272 dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n");
273 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
274 if (ieee80211softmac_start_scan(mac)) {
275 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
276 }
277 goto out;
278 } else {
279 mac->associnfo.associating = 0;
280 mac->associnfo.associated = 0;
281
282 dprintk(KERN_INFO PFX "Unable to find matching network after scan!\n");
283 /* reset the retry counter for the next user request since we
284 * break out and don't reschedule ourselves after this point. */
285 mac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
286 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_NET_NOT_FOUND, NULL);
287 goto out;
288 }
289 }
290
291 /* reset the retry counter for the next user request since we
292 * now found a net and will try to associate to it, but not
293 * schedule this function again. */
294 mac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
295 mac->associnfo.bssvalid = 1;
296 memcpy(mac->associnfo.bssid, found->bssid, ETH_ALEN);
297 /* copy the ESSID for displaying it */
298 mac->associnfo.associate_essid.len = found->essid.len;
299 memcpy(mac->associnfo.associate_essid.data, found->essid.data, IW_ESSID_MAX_SIZE + 1);
300
301 /* we found a network! authenticate (if necessary) and associate to it. */
302 if (found->authenticating) {
303 dprintk(KERN_INFO PFX "Already requested authentication, waiting...\n");
304 if(!mac->associnfo.assoc_wait) {
305 mac->associnfo.assoc_wait = 1;
306 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL);
307 }
308 goto out;
309 }
310 if (!found->authenticated && !found->authenticating) {
311 /* This relies on the fact that _auth_req only queues the work,
312 * otherwise adding the notification would be racy. */
313 if (!ieee80211softmac_auth_req(mac, found)) {
314 if(!mac->associnfo.assoc_wait) {
315 dprintk(KERN_INFO PFX "Cannot associate without being authenticated, requested authentication\n");
316 mac->associnfo.assoc_wait = 1;
317 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL);
318 }
319 } else {
320 printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n");
321 mac->associnfo.assoc_wait = 0;
322 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found);
323 }
324 goto out;
325 }
326 /* finally! now we can start associating */
327 mac->associnfo.assoc_wait = 0;
328 ieee80211softmac_assoc(mac, found);
329
330out:
331 mutex_unlock(&mac->associnfo.mutex);
332}
333
334/* call this to do whatever is necessary when we're associated */
335static void
336ieee80211softmac_associated(struct ieee80211softmac_device *mac,
337 struct ieee80211_assoc_response * resp,
338 struct ieee80211softmac_network *net)
339{
340 u16 cap = le16_to_cpu(resp->capability);
341 u8 erp_value = net->erp_value;
342
343 mac->associnfo.associating = 0;
344 mac->bssinfo.supported_rates = net->supported_rates;
345 ieee80211softmac_recalc_txrates(mac);
346
347 mac->associnfo.associated = 1;
348
349 mac->associnfo.short_preamble_available =
350 (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0;
351 ieee80211softmac_process_erp(mac, erp_value);
352
353 if (mac->set_bssid_filter)
354 mac->set_bssid_filter(mac->dev, net->bssid);
355 memcpy(mac->ieee->bssid, net->bssid, ETH_ALEN);
356 netif_carrier_on(mac->dev);
357
358 mac->association_id = le16_to_cpup(&resp->aid);
359}
360
361/* received frame handling functions */
362int
363ieee80211softmac_handle_assoc_response(struct net_device * dev,
364 struct ieee80211_assoc_response * resp,
365 struct ieee80211_network * _ieee80211_network)
366{
367 /* NOTE: the network parameter has to be mostly ignored by
368 * this code because it is the ieee80211's pointer
369 * to the struct, not ours (we made a copy)
370 */
371 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
372 u16 status = le16_to_cpup(&resp->status);
373 struct ieee80211softmac_network *network = NULL;
374 unsigned long flags;
375 DECLARE_MAC_BUF(mac2);
376
377 if (unlikely(!mac->running))
378 return -ENODEV;
379
380 spin_lock_irqsave(&mac->lock, flags);
381
382 if (!mac->associnfo.associating) {
383 /* we race against the timeout function, so make sure
384 * only one of us can do work */
385 spin_unlock_irqrestore(&mac->lock, flags);
386 return 0;
387 }
388 network = ieee80211softmac_get_network_by_bssid_locked(mac, resp->header.addr3);
389
390 /* someone sending us things without us knowing him? Ignore. */
391 if (!network) {
392 dprintk(KERN_INFO PFX "Received unrequested assocation response from %s\n",
393 print_mac(mac2, resp->header.addr3));
394 spin_unlock_irqrestore(&mac->lock, flags);
395 return 0;
396 }
397
398 /* now that we know it was for us, we can cancel the timeout */
399 cancel_delayed_work(&mac->associnfo.timeout);
400
401 /* if the association response included an ERP IE, update our saved
402 * copy */
403 if (_ieee80211_network->flags & NETWORK_HAS_ERP_VALUE)
404 network->erp_value = _ieee80211_network->erp_value;
405
406 switch (status) {
407 case 0:
408 dprintk(KERN_INFO PFX "associated!\n");
409 ieee80211softmac_associated(mac, resp, network);
410 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATED, network);
411 break;
412 case WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH:
413 if (!network->auth_desynced_once) {
414 /* there seem to be a few rare cases where our view of
415 * the world is obscured, or buggy APs that don't DEAUTH
416 * us properly. So we handle that, but allow it only once.
417 */
418 printkl(KERN_INFO PFX "We were not authenticated during association, retrying...\n");
419 network->authenticated = 0;
420 /* we don't want to do this more than once ... */
421 network->auth_desynced_once = 1;
422 queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
423 break;
424 }
425 default:
426 dprintk(KERN_INFO PFX "associating failed (reason: 0x%x)!\n", status);
427 mac->associnfo.associating = 0;
428 mac->associnfo.bssvalid = 0;
429 mac->associnfo.associated = 0;
430 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, network);
431 }
432
433 spin_unlock_irqrestore(&mac->lock, flags);
434 return 0;
435}
436
437void
438ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac)
439{
440 unsigned long flags;
441
442 spin_lock_irqsave(&mac->lock, flags);
443 mac->associnfo.associating = 1;
444 queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
445 spin_unlock_irqrestore(&mac->lock, flags);
446}
447
448int
449ieee80211softmac_handle_disassoc(struct net_device * dev,
450 struct ieee80211_disassoc *disassoc)
451{
452 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
453
454 if (unlikely(!mac->running))
455 return -ENODEV;
456
457 if (memcmp(disassoc->header.addr2, mac->associnfo.bssid, ETH_ALEN))
458 return 0;
459
460 if (memcmp(disassoc->header.addr1, mac->dev->dev_addr, ETH_ALEN))
461 return 0;
462
463 dprintk(KERN_INFO PFX "got disassoc frame\n");
464 ieee80211softmac_disassoc(mac);
465
466 ieee80211softmac_try_reassoc(mac);
467
468 return 0;
469}
470
471int
472ieee80211softmac_handle_reassoc_req(struct net_device * dev,
473 struct ieee80211_reassoc_request * resp)
474{
475 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
476 struct ieee80211softmac_network *network;
477
478 if (unlikely(!mac->running))
479 return -ENODEV;
480
481 network = ieee80211softmac_get_network_by_bssid(mac, resp->header.addr3);
482 if (!network) {
483 dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
484 return 0;
485 }
486 queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
487
488 return 0;
489}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
deleted file mode 100644
index 1a96c2572578..000000000000
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ /dev/null
@@ -1,413 +0,0 @@
1/*
2 * This file contains the softmac's authentication logic.
3 *
4 * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net>
5 * Joseph Jezak <josejx@gentoo.org>
6 * Larry Finger <Larry.Finger@lwfinger.net>
7 * Danny van Dyk <kugelfang@gentoo.org>
8 * Michael Buesch <mbuesch@freenet.de>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 */
26
27#include "ieee80211softmac_priv.h"
28
29static void ieee80211softmac_auth_queue(struct work_struct *work);
30
31/* Queues an auth request to the desired AP */
32int
33ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
34 struct ieee80211softmac_network *net)
35{
36 struct ieee80211softmac_auth_queue_item *auth;
37 unsigned long flags;
38 DECLARE_MAC_BUF(mac2);
39
40 if (net->authenticating || net->authenticated)
41 return 0;
42 net->authenticating = 1;
43
44 /* Add the network if it's not already added */
45 ieee80211softmac_add_network(mac, net);
46
47 dprintk(KERN_NOTICE PFX "Queueing Authentication Request to %s\n", print_mac(mac2, net->bssid));
48 /* Queue the auth request */
49 auth = (struct ieee80211softmac_auth_queue_item *)
50 kmalloc(sizeof(struct ieee80211softmac_auth_queue_item), GFP_KERNEL);
51 if(auth == NULL)
52 return -ENOMEM;
53
54 auth->net = net;
55 auth->mac = mac;
56 auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
57 auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
58 INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
59
60 /* Lock (for list) */
61 spin_lock_irqsave(&mac->lock, flags);
62
63 /* add to list */
64 list_add_tail(&auth->list, &mac->auth_queue);
65 queue_delayed_work(mac->wq, &auth->work, 0);
66 spin_unlock_irqrestore(&mac->lock, flags);
67
68 return 0;
69}
70
71
72/* Sends an auth request to the desired AP and handles timeouts */
73static void
74ieee80211softmac_auth_queue(struct work_struct *work)
75{
76 struct ieee80211softmac_device *mac;
77 struct ieee80211softmac_auth_queue_item *auth;
78 struct ieee80211softmac_network *net;
79 unsigned long flags;
80 DECLARE_MAC_BUF(mac2);
81
82 auth = container_of(work, struct ieee80211softmac_auth_queue_item,
83 work.work);
84 net = auth->net;
85 mac = auth->mac;
86
87 if(auth->retry > 0) {
88 /* Switch to correct channel for this network */
89 mac->set_channel(mac->dev, net->channel);
90
91 /* Lock and set flags */
92 spin_lock_irqsave(&mac->lock, flags);
93 if (unlikely(!mac->running)) {
94 /* Prevent reschedule on workqueue flush */
95 spin_unlock_irqrestore(&mac->lock, flags);
96 return;
97 }
98 net->authenticated = 0;
99 /* add a timeout call so we eventually give up waiting for an auth reply */
100 queue_delayed_work(mac->wq, &auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT);
101 auth->retry--;
102 spin_unlock_irqrestore(&mac->lock, flags);
103 if (ieee80211softmac_send_mgt_frame(mac, auth->net, IEEE80211_STYPE_AUTH, auth->state))
104 dprintk(KERN_NOTICE PFX "Sending Authentication Request to %s failed (this shouldn't happen, wait for the timeout).\n",
105 print_mac(mac2, net->bssid));
106 else
107 dprintk(KERN_NOTICE PFX "Sent Authentication Request to %s.\n", print_mac(mac2, net->bssid));
108 return;
109 }
110
111 printkl(KERN_WARNING PFX "Authentication timed out with %s\n", print_mac(mac2, net->bssid));
112 /* Remove this item from the queue */
113 spin_lock_irqsave(&mac->lock, flags);
114 net->authenticating = 0;
115 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT, net);
116 cancel_delayed_work(&auth->work); /* just to make sure... */
117 list_del(&auth->list);
118 spin_unlock_irqrestore(&mac->lock, flags);
119 /* Free it */
120 kfree(auth);
121}
122
123/* Sends a response to an auth challenge (for shared key auth). */
124static void
125ieee80211softmac_auth_challenge_response(struct work_struct *work)
126{
127 struct ieee80211softmac_auth_queue_item *aq =
128 container_of(work, struct ieee80211softmac_auth_queue_item,
129 work.work);
130
131 /* Send our response */
132 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
133}
134
135/* Handle the auth response from the AP
136 * This should be registered with ieee80211 as handle_auth
137 */
138int
139ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
140{
141
142 struct list_head *list_ptr;
143 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
144 struct ieee80211softmac_auth_queue_item *aq = NULL;
145 struct ieee80211softmac_network *net = NULL;
146 unsigned long flags;
147 u8 * data;
148 DECLARE_MAC_BUF(mac2);
149
150 if (unlikely(!mac->running))
151 return -ENODEV;
152
153 /* Find correct auth queue item */
154 spin_lock_irqsave(&mac->lock, flags);
155 list_for_each(list_ptr, &mac->auth_queue) {
156 aq = list_entry(list_ptr, struct ieee80211softmac_auth_queue_item, list);
157 net = aq->net;
158 if (!memcmp(net->bssid, auth->header.addr2, ETH_ALEN))
159 break;
160 else
161 aq = NULL;
162 }
163 spin_unlock_irqrestore(&mac->lock, flags);
164
165 /* Make sure that we've got an auth queue item for this request */
166 if(aq == NULL)
167 {
168 dprintkl(KERN_DEBUG PFX "Authentication response received from %s but no queue item exists.\n", print_mac(mac2, auth->header.addr2));
169 /* Error #? */
170 return -1;
171 }
172
173 /* Check for out of order authentication */
174 if(!net->authenticating)
175 {
176 dprintkl(KERN_DEBUG PFX "Authentication response received from %s but did not request authentication.\n",print_mac(mac2, auth->header.addr2));
177 return -1;
178 }
179
180 /* Parse the auth packet */
181 switch(le16_to_cpu(auth->algorithm)) {
182 case WLAN_AUTH_OPEN:
183 /* Check the status code of the response */
184
185 switch(le16_to_cpu(auth->status)) {
186 case WLAN_STATUS_SUCCESS:
187 /* Update the status to Authenticated */
188 spin_lock_irqsave(&mac->lock, flags);
189 net->authenticating = 0;
190 net->authenticated = 1;
191 spin_unlock_irqrestore(&mac->lock, flags);
192
193 /* Send event */
194 printkl(KERN_NOTICE PFX "Open Authentication completed with %s\n", print_mac(mac2, net->bssid));
195 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net);
196 break;
197 default:
198 /* Lock and reset flags */
199 spin_lock_irqsave(&mac->lock, flags);
200 net->authenticated = 0;
201 net->authenticating = 0;
202 spin_unlock_irqrestore(&mac->lock, flags);
203
204 printkl(KERN_NOTICE PFX "Open Authentication with %s failed, error code: %i\n",
205 print_mac(mac2, net->bssid), le16_to_cpup(&auth->status));
206 /* Count the error? */
207 break;
208 }
209 goto free_aq;
210 break;
211 case WLAN_AUTH_SHARED_KEY:
212 /* Figure out where we are in the process */
213 switch(le16_to_cpu(auth->transaction)) {
214 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE:
215 /* Check to make sure we have a challenge IE */
216 data = (u8 *)auth->info_element;
217 if (*data++ != MFIE_TYPE_CHALLENGE) {
218 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n");
219 break;
220 }
221 /* Save the challenge */
222 spin_lock_irqsave(&mac->lock, flags);
223 net->challenge_len = *data++;
224 if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
225 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
226 kfree(net->challenge);
227 net->challenge = kmemdup(data, net->challenge_len,
228 GFP_ATOMIC);
229 if (net->challenge == NULL) {
230 printkl(KERN_NOTICE PFX "Shared Key "
231 "Authentication failed due to "
232 "memory shortage.\n");
233 spin_unlock_irqrestore(&mac->lock, flags);
234 break;
235 }
236 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
237
238 /* We reuse the work struct from the auth request here.
239 * It is safe to do so as each one is per-request, and
240 * at this point (dealing with authentication response)
241 * we have obviously already sent the initial auth
242 * request. */
243 cancel_delayed_work(&aq->work);
244 INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
245 queue_delayed_work(mac->wq, &aq->work, 0);
246 spin_unlock_irqrestore(&mac->lock, flags);
247 return 0;
248 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
249 kfree(net->challenge);
250 net->challenge = NULL;
251 net->challenge_len = 0;
252 /* Check the status code of the response */
253 switch(auth->status) {
254 case WLAN_STATUS_SUCCESS:
255 /* Update the status to Authenticated */
256 spin_lock_irqsave(&mac->lock, flags);
257 net->authenticating = 0;
258 net->authenticated = 1;
259 spin_unlock_irqrestore(&mac->lock, flags);
260 printkl(KERN_NOTICE PFX "Shared Key Authentication completed with %s\n",
261 print_mac(mac2, net->bssid));
262 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net);
263 break;
264 default:
265 printkl(KERN_NOTICE PFX "Shared Key Authentication with %s failed, error code: %i\n",
266 print_mac(mac2, net->bssid), le16_to_cpup(&auth->status));
267 /* Lock and reset flags */
268 spin_lock_irqsave(&mac->lock, flags);
269 net->authenticating = 0;
270 net->authenticated = 0;
271 spin_unlock_irqrestore(&mac->lock, flags);
272 /* Count the error? */
273 break;
274 }
275 goto free_aq;
276 break;
277 default:
278 printkl(KERN_WARNING PFX "Unhandled Authentication Step: %i\n", auth->transaction);
279 break;
280 }
281 goto free_aq;
282 break;
283 default:
284 /* ERROR */
285 goto free_aq;
286 break;
287 }
288 return 0;
289free_aq:
290 /* Cancel the timeout */
291 spin_lock_irqsave(&mac->lock, flags);
292 cancel_delayed_work(&aq->work);
293 /* Remove this item from the queue */
294 list_del(&aq->list);
295 spin_unlock_irqrestore(&mac->lock, flags);
296
297 /* Free it */
298 kfree(aq);
299 return 0;
300}
301
302/*
303 * Handle deauthorization
304 */
305static void
306ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
307 struct ieee80211softmac_network *net)
308{
309 struct ieee80211softmac_auth_queue_item *aq = NULL;
310 struct list_head *list_ptr;
311 unsigned long flags;
312
313 /* deauthentication implies disassociation */
314 ieee80211softmac_disassoc(mac);
315
316 /* Lock and reset status flags */
317 spin_lock_irqsave(&mac->lock, flags);
318 net->authenticating = 0;
319 net->authenticated = 0;
320
321 /* Find correct auth queue item, if it exists */
322 list_for_each(list_ptr, &mac->auth_queue) {
323 aq = list_entry(list_ptr, struct ieee80211softmac_auth_queue_item, list);
324 if (!memcmp(net->bssid, aq->net->bssid, ETH_ALEN))
325 break;
326 else
327 aq = NULL;
328 }
329
330 /* Cancel pending work */
331 if(aq != NULL)
332 /* Not entirely safe? What about running work? */
333 cancel_delayed_work(&aq->work);
334
335 /* Free our network ref */
336 ieee80211softmac_del_network_locked(mac, net);
337 if(net->challenge != NULL)
338 kfree(net->challenge);
339 kfree(net);
340
341 /* can't transmit data right now... */
342 netif_carrier_off(mac->dev);
343 spin_unlock_irqrestore(&mac->lock, flags);
344
345 ieee80211softmac_try_reassoc(mac);
346}
347
348/*
349 * Sends a deauth request to the desired AP
350 */
351int
352ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac,
353 struct ieee80211softmac_network *net, int reason)
354{
355 int ret;
356
357 /* Make sure the network is authenticated */
358 if (!net->authenticated)
359 {
360 dprintkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
361 /* Error okay? */
362 return -EPERM;
363 }
364
365 /* Send the de-auth packet */
366 if((ret = ieee80211softmac_send_mgt_frame(mac, net, IEEE80211_STYPE_DEAUTH, reason)))
367 return ret;
368
369 ieee80211softmac_deauth_from_net(mac, net);
370 return 0;
371}
372
373/*
374 * This should be registered with ieee80211 as handle_deauth
375 */
376int
377ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth)
378{
379
380 struct ieee80211softmac_network *net = NULL;
381 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
382 DECLARE_MAC_BUF(mac2);
383
384 if (unlikely(!mac->running))
385 return -ENODEV;
386
387 if (!deauth) {
388 dprintk("deauth without deauth packet. eek!\n");
389 return 0;
390 }
391
392 net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2);
393
394 if (net == NULL) {
395 dprintkl(KERN_DEBUG PFX "Received deauthentication packet from %s, but that network is unknown.\n",
396 print_mac(mac2, deauth->header.addr2));
397 return 0;
398 }
399
400 /* Make sure the network is authenticated */
401 if(!net->authenticated)
402 {
403 dprintkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
404 /* Error okay? */
405 return -EPERM;
406 }
407
408 ieee80211softmac_deauth_from_net(mac, net);
409
410 /* let's try to re-associate */
411 queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
412 return 0;
413}
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
deleted file mode 100644
index 8cef05b60f16..000000000000
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ /dev/null
@@ -1,189 +0,0 @@
1/*
2 * Event system
3 * Also see comments in public header file and longer explanation below.
4 *
5 * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net>
6 * Joseph Jezak <josejx@gentoo.org>
7 * Larry Finger <Larry.Finger@lwfinger.net>
8 * Danny van Dyk <kugelfang@gentoo.org>
9 * Michael Buesch <mbuesch@freenet.de>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 *
24 * The full GNU General Public License is included in this distribution in the
25 * file called COPYING.
26 */
27
28#include "ieee80211softmac_priv.h"
29
30/*
31 * Each event has associated to it
32 * - an event type (see constants in public header)
33 * - an event context (see below)
34 * - the function to be called
35 * - a context (extra parameter to call the function with)
36 * - and the softmac struct
37 *
38 * The event context is private and can only be used from
39 * within this module. Its meaning varies with the event
40 * type:
41 * SCAN_FINISHED,
42 * DISASSOCIATED: NULL
43 * ASSOCIATED,
44 * ASSOCIATE_FAILED,
45 * ASSOCIATE_TIMEOUT,
46 * AUTHENTICATED,
47 * AUTH_FAILED,
48 * AUTH_TIMEOUT: a pointer to the network struct
49 * ...
50 * Code within this module can use the event context to be only
51 * called when the event is true for that specific context
52 * as per above table.
53 * If the event context is NULL, then the notification is always called,
54 * regardless of the event context. The event context is not passed to
55 * the callback, it is assumed that the context suffices.
56 *
57 * You can also use the event context only by setting the event type
58 * to -1 (private use only), in which case you'll be notified
59 * whenever the event context matches.
60 */
61
62static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = {
63 NULL, /* scan finished */
64 NULL, /* associated */
65 "associating failed",
66 "associating timed out",
67 "authenticated",
68 "authenticating failed",
69 "authenticating timed out",
70 "associating failed because no suitable network was found",
71 NULL, /* disassociated */
72};
73
74
75static void
76ieee80211softmac_notify_callback(struct work_struct *work)
77{
78 struct ieee80211softmac_event *pevent =
79 container_of(work, struct ieee80211softmac_event, work.work);
80 struct ieee80211softmac_event event = *pevent;
81 kfree(pevent);
82
83 event.fun(event.mac->dev, event.event_type, event.context);
84}
85
86int
87ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
88 int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask)
89{
90 struct ieee80211softmac_event *eventptr;
91 unsigned long flags;
92
93 if (event < -1 || event > IEEE80211SOFTMAC_EVENT_LAST)
94 return -ENOSYS;
95
96 if (!fun)
97 return -EINVAL;
98
99 eventptr = kmalloc(sizeof(struct ieee80211softmac_event), gfp_mask);
100 if (!eventptr)
101 return -ENOMEM;
102
103 eventptr->event_type = event;
104 INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
105 eventptr->fun = fun;
106 eventptr->context = context;
107 eventptr->mac = mac;
108 eventptr->event_context = event_context;
109
110 spin_lock_irqsave(&mac->lock, flags);
111 list_add(&eventptr->list, &mac->events);
112 spin_unlock_irqrestore(&mac->lock, flags);
113
114 return 0;
115}
116
117int
118ieee80211softmac_notify_gfp(struct net_device *dev,
119 int event, notify_function_ptr fun, void *context, gfp_t gfp_mask)
120{
121 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
122
123 if (event < 0 || event > IEEE80211SOFTMAC_EVENT_LAST)
124 return -ENOSYS;
125
126 return ieee80211softmac_notify_internal(mac, event, NULL, fun, context, gfp_mask);
127}
128EXPORT_SYMBOL_GPL(ieee80211softmac_notify_gfp);
129
130/* private -- calling all callbacks that were specified */
131void
132ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int event, void *event_ctx)
133{
134 struct ieee80211softmac_event *eventptr, *tmp;
135 struct ieee80211softmac_network *network;
136
137 if (event >= 0) {
138 union iwreq_data wrqu;
139 int we_event;
140 char *msg = NULL;
141
142 memset(&wrqu, '\0', sizeof (union iwreq_data));
143
144 switch(event) {
145 case IEEE80211SOFTMAC_EVENT_ASSOCIATED:
146 network = (struct ieee80211softmac_network *)event_ctx;
147 memcpy(wrqu.ap_addr.sa_data, &network->bssid[0], ETH_ALEN);
148 /* fall through */
149 case IEEE80211SOFTMAC_EVENT_DISASSOCIATED:
150 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
151 we_event = SIOCGIWAP;
152 break;
153 case IEEE80211SOFTMAC_EVENT_SCAN_FINISHED:
154 we_event = SIOCGIWSCAN;
155 break;
156 default:
157 msg = event_descriptions[event];
158 if (!msg)
159 msg = "SOFTMAC EVENT BUG";
160 wrqu.data.length = strlen(msg);
161 we_event = IWEVCUSTOM;
162 break;
163 }
164 wireless_send_event(mac->dev, we_event, &wrqu, msg);
165 }
166
167 if (!list_empty(&mac->events))
168 list_for_each_entry_safe(eventptr, tmp, &mac->events, list) {
169 if ((eventptr->event_type == event || eventptr->event_type == -1)
170 && (eventptr->event_context == NULL || eventptr->event_context == event_ctx)) {
171 list_del(&eventptr->list);
172 /* User may have subscribed to ANY event, so
173 * we tell them which event triggered it. */
174 eventptr->event_type = event;
175 queue_delayed_work(mac->wq, &eventptr->work, 0);
176 }
177 }
178}
179
180void
181ieee80211softmac_call_events(struct ieee80211softmac_device *mac, int event, void *event_ctx)
182{
183 unsigned long flags;
184
185 spin_lock_irqsave(&mac->lock, flags);
186 ieee80211softmac_call_events_locked(mac, event, event_ctx);
187
188 spin_unlock_irqrestore(&mac->lock, flags);
189}
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
deleted file mode 100644
index 73b4b13fbd8f..000000000000
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ /dev/null
@@ -1,488 +0,0 @@
1/*
2 * Some parts based on code from net80211
3 * Copyright (c) 2001 Atsushi Onoe
4 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include "ieee80211softmac_priv.h"
36
37/* Helper functions for inserting data into the frames */
38
39/*
40 * Adds an ESSID element to the frame
41 *
42 */
43static u8 *
44ieee80211softmac_add_essid(u8 *dst, struct ieee80211softmac_essid *essid)
45{
46 if (essid) {
47 *dst++ = MFIE_TYPE_SSID;
48 *dst++ = essid->len;
49 memcpy(dst, essid->data, essid->len);
50 return dst+essid->len;
51 } else {
52 *dst++ = MFIE_TYPE_SSID;
53 *dst++ = 0;
54 return dst;
55 }
56}
57
58/* Adds Supported Rates and if required Extended Rates Information Element
59 * to the frame, ASSUMES WE HAVE A SORTED LIST OF RATES */
60static u8 *
61ieee80211softmac_frame_add_rates(u8 *dst, const struct ieee80211softmac_ratesinfo *r)
62{
63 int cck_len, ofdm_len;
64 *dst++ = MFIE_TYPE_RATES;
65
66 for(cck_len=0; ieee80211_is_cck_rate(r->rates[cck_len]) && (cck_len < r->count);cck_len++);
67
68 if(cck_len > IEEE80211SOFTMAC_MAX_RATES_LEN)
69 cck_len = IEEE80211SOFTMAC_MAX_RATES_LEN;
70 *dst++ = cck_len;
71 memcpy(dst, r->rates, cck_len);
72 dst += cck_len;
73
74 if(cck_len < r->count){
75 for (ofdm_len=0; ieee80211_is_ofdm_rate(r->rates[ofdm_len + cck_len]) && (ofdm_len + cck_len < r->count); ofdm_len++);
76 if (ofdm_len > 0) {
77 if (ofdm_len > IEEE80211SOFTMAC_MAX_EX_RATES_LEN)
78 ofdm_len = IEEE80211SOFTMAC_MAX_EX_RATES_LEN;
79 *dst++ = MFIE_TYPE_RATES_EX;
80 *dst++ = ofdm_len;
81 memcpy(dst, r->rates + cck_len, ofdm_len);
82 dst += ofdm_len;
83 }
84 }
85 return dst;
86}
87
88/* Allocate a management frame */
89static u8 *
90ieee80211softmac_alloc_mgt(u32 size)
91{
92 u8 * data;
93
94 /* Add the header and FCS to the size */
95 size = size + IEEE80211_3ADDR_LEN;
96 if(size > IEEE80211_DATA_LEN)
97 return NULL;
98 /* Allocate the frame */
99 data = kzalloc(size, GFP_ATOMIC);
100 return data;
101}
102
103/*
104 * Add a 2 Address Header
105 */
106static void
107ieee80211softmac_hdr_2addr(struct ieee80211softmac_device *mac,
108 struct ieee80211_hdr_2addr *header, u32 type, u8 *dest)
109{
110 /* Fill in the frame control flags */
111 header->frame_ctl = cpu_to_le16(type);
112 /* Control packets always have WEP turned off */
113 if(type > IEEE80211_STYPE_CFENDACK && type < IEEE80211_STYPE_PSPOLL)
114 header->frame_ctl |= mac->ieee->sec.level ? cpu_to_le16(IEEE80211_FCTL_PROTECTED) : 0;
115
116 /* Fill in the duration */
117 header->duration_id = 0;
118 /* FIXME: How do I find this?
119 * calculate. But most drivers just fill in 0 (except if it's a station id of course) */
120
121 /* Fill in the Destination Address */
122 if(dest == NULL)
123 memset(header->addr1, 0xFF, ETH_ALEN);
124 else
125 memcpy(header->addr1, dest, ETH_ALEN);
126 /* Fill in the Source Address */
127 memcpy(header->addr2, mac->ieee->dev->dev_addr, ETH_ALEN);
128
129}
130
131
132/* Add a 3 Address Header */
133static void
134ieee80211softmac_hdr_3addr(struct ieee80211softmac_device *mac,
135 struct ieee80211_hdr_3addr *header, u32 type, u8 *dest, u8 *bssid)
136{
137 /* This is common with 2addr, so use that instead */
138 ieee80211softmac_hdr_2addr(mac, (struct ieee80211_hdr_2addr *)header, type, dest);
139
140 /* Fill in the BSS ID */
141 if(bssid == NULL)
142 memset(header->addr3, 0xFF, ETH_ALEN);
143 else
144 memcpy(header->addr3, bssid, ETH_ALEN);
145
146 /* Fill in the sequence # */
147 /* FIXME: I need to add this to the softmac struct
148 * shouldn't the sequence number be in ieee80211? */
149}
150
151static __le16
152ieee80211softmac_capabilities(struct ieee80211softmac_device *mac,
153 struct ieee80211softmac_network *net)
154{
155 __le16 capability = 0;
156
157 /* ESS and IBSS bits are set according to the current mode */
158 switch (mac->ieee->iw_mode) {
159 case IW_MODE_INFRA:
160 capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
161 break;
162 case IW_MODE_ADHOC:
163 capability = cpu_to_le16(WLAN_CAPABILITY_IBSS);
164 break;
165 case IW_MODE_AUTO:
166 capability = cpu_to_le16(net->capabilities &
167 (WLAN_CAPABILITY_ESS|WLAN_CAPABILITY_IBSS));
168 break;
169 default:
170 /* bleh. we don't ever go to these modes */
171 printk(KERN_ERR PFX "invalid iw_mode!\n");
172 break;
173 }
174
175 /* CF Pollable / CF Poll Request */
176 /* Needs to be implemented, for now, the 0's == not supported */
177
178 /* Privacy Bit */
179 capability |= mac->ieee->sec.level ?
180 cpu_to_le16(WLAN_CAPABILITY_PRIVACY) : 0;
181
182 /* Short Preamble */
183 /* Always supported: we probably won't ever be powering devices which
184 * dont support this... */
185 capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
186
187 /* PBCC */
188 /* Not widely used */
189
190 /* Channel Agility */
191 /* Not widely used */
192
193 /* Short Slot */
194 /* Will be implemented later */
195
196 /* DSSS-OFDM */
197 /* Not widely used */
198
199 return capability;
200}
201
202/*****************************************************************************
203 * Create Management packets
204 *****************************************************************************/
205
206/* Creates an association request packet */
207static u32
208ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt,
209 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net)
210{
211 u8 *data;
212 (*pkt) = (struct ieee80211_assoc_request *)ieee80211softmac_alloc_mgt(
213 2 + /* Capability Info */
214 2 + /* Listen Interval */
215 /* SSID IE */
216 1 + 1 + IW_ESSID_MAX_SIZE +
217 /* Rates IE */
218 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN +
219 /* Extended Rates IE */
220 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN +
221 /* WPA IE if present */
222 mac->wpa.IElen
223 /* Other IE's? Optional?
224 * Yeah, probably need an extra IE parameter -- lots of vendors like to
225 * fill in their own IEs */
226 );
227 if (unlikely((*pkt) == NULL))
228 return 0;
229 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid);
230
231 /* Fill in the capabilities */
232 (*pkt)->capability = ieee80211softmac_capabilities(mac, net);
233
234 /* Fill in Listen Interval (?) */
235 (*pkt)->listen_interval = cpu_to_le16(10);
236
237 data = (u8 *)(*pkt)->info_element;
238 /* Add SSID */
239 data = ieee80211softmac_add_essid(data, &net->essid);
240 /* Add Rates */
241 data = ieee80211softmac_frame_add_rates(data, &mac->ratesinfo);
242 /* Add WPA IE */
243 if (mac->wpa.IElen && mac->wpa.IE) {
244 memcpy(data, mac->wpa.IE, mac->wpa.IElen);
245 data += mac->wpa.IElen;
246 }
247 /* Return the number of used bytes */
248 return (data - (u8*)(*pkt));
249}
250
251/* Create a reassociation request packet */
252static u32
253ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
254 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net)
255{
256 u8 *data;
257 (*pkt) = (struct ieee80211_reassoc_request *)ieee80211softmac_alloc_mgt(
258 2 + /* Capability Info */
259 2 + /* Listen Interval */
260 ETH_ALEN + /* AP MAC */
261 /* SSID IE */
262 1 + 1 + IW_ESSID_MAX_SIZE +
263 /* Rates IE */
264 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN +
265 /* Extended Rates IE */
266 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN
267 /* Other IE's? */
268 );
269 if (unlikely((*pkt) == NULL))
270 return 0;
271 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_REASSOC_REQ, net->bssid, net->bssid);
272
273 /* Fill in the capabilities */
274 (*pkt)->capability = ieee80211softmac_capabilities(mac, net);
275
276 /* Fill in Listen Interval (?) */
277 (*pkt)->listen_interval = cpu_to_le16(10);
278 /* Fill in the current AP MAC */
279 memcpy((*pkt)->current_ap, mac->ieee->bssid, ETH_ALEN);
280
281 data = (u8 *)(*pkt)->info_element;
282 /* Add SSID */
283 data = ieee80211softmac_add_essid(data, &net->essid);
284 /* Add Rates */
285 data = ieee80211softmac_frame_add_rates(data, &mac->ratesinfo);
286 /* Return packet size */
287 return (data - (u8 *)(*pkt));
288}
289
290/* Create an authentication packet */
291static u32
292ieee80211softmac_auth(struct ieee80211_auth **pkt,
293 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net,
294 u16 transaction, u16 status, int *encrypt_mpdu)
295{
296 u8 *data;
297 int auth_mode = mac->ieee->sec.auth_mode;
298 int is_shared_response = (auth_mode == WLAN_AUTH_SHARED_KEY
299 && transaction == IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE);
300
301 /* Allocate Packet */
302 (*pkt) = (struct ieee80211_auth *)ieee80211softmac_alloc_mgt(
303 2 + /* Auth Algorithm */
304 2 + /* Auth Transaction Seq */
305 2 + /* Status Code */
306 /* Challenge Text IE */
307 (is_shared_response ? 1 + 1 + net->challenge_len : 0)
308 );
309 if (unlikely((*pkt) == NULL))
310 return 0;
311 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_AUTH, net->bssid, net->bssid);
312
313 /* Algorithm */
314 (*pkt)->algorithm = cpu_to_le16(auth_mode);
315 /* Transaction */
316 (*pkt)->transaction = cpu_to_le16(transaction);
317 /* Status */
318 (*pkt)->status = cpu_to_le16(status);
319
320 data = (u8 *)(*pkt)->info_element;
321 /* Challenge Text */
322 if (is_shared_response) {
323 *data = MFIE_TYPE_CHALLENGE;
324 data++;
325
326 /* Copy the challenge in */
327 *data = net->challenge_len;
328 data++;
329 memcpy(data, net->challenge, net->challenge_len);
330 data += net->challenge_len;
331
332 /* Make sure this frame gets encrypted with the shared key */
333 *encrypt_mpdu = 1;
334 } else
335 *encrypt_mpdu = 0;
336
337 /* Return the packet size */
338 return (data - (u8 *)(*pkt));
339}
340
341/* Create a disassocation or deauthentication packet */
342static u32
343ieee80211softmac_disassoc_deauth(struct ieee80211_disassoc **pkt,
344 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net,
345 u16 type, u16 reason)
346{
347 /* Allocate Packet */
348 (*pkt) = (struct ieee80211_disassoc *)ieee80211softmac_alloc_mgt(2);
349 if (unlikely((*pkt) == NULL))
350 return 0;
351 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), type, net->bssid, net->bssid);
352 /* Reason */
353 (*pkt)->reason = cpu_to_le16(reason);
354 /* Return the packet size */
355 return (2 + IEEE80211_3ADDR_LEN);
356}
357
358/* Create a probe request packet */
359static u32
360ieee80211softmac_probe_req(struct ieee80211_probe_request **pkt,
361 struct ieee80211softmac_device *mac, struct ieee80211softmac_essid *essid)
362{
363 u8 *data;
364 /* Allocate Packet */
365 (*pkt) = (struct ieee80211_probe_request *)ieee80211softmac_alloc_mgt(
366 /* SSID of requested network */
367 1 + 1 + IW_ESSID_MAX_SIZE +
368 /* Rates IE */
369 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN +
370 /* Extended Rates IE */
371 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN
372 );
373 if (unlikely((*pkt) == NULL))
374 return 0;
375 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_PROBE_REQ, NULL, NULL);
376
377 data = (u8 *)(*pkt)->info_element;
378 /* Add ESSID (can be NULL) */
379 data = ieee80211softmac_add_essid(data, essid);
380 /* Add Rates */
381 data = ieee80211softmac_frame_add_rates(data, &mac->ratesinfo);
382 /* Return packet size */
383 return (data - (u8 *)(*pkt));
384}
385
386/* Create a probe response packet */
387/* FIXME: Not complete */
388static u32
389ieee80211softmac_probe_resp(struct ieee80211_probe_response **pkt,
390 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net)
391{
392 u8 *data;
393 /* Allocate Packet */
394 (*pkt) = (struct ieee80211_probe_response *)ieee80211softmac_alloc_mgt(
395 8 + /* Timestamp */
396 2 + /* Beacon Interval */
397 2 + /* Capability Info */
398 /* SSID IE */
399 1 + 1 + IW_ESSID_MAX_SIZE +
400 7 + /* FH Parameter Set */
401 2 + /* DS Parameter Set */
402 8 + /* CF Parameter Set */
403 4 /* IBSS Parameter Set */
404 );
405 if (unlikely((*pkt) == NULL))
406 return 0;
407 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_PROBE_RESP, net->bssid, net->bssid);
408 data = (u8 *)(*pkt)->info_element;
409
410 /* Return the packet size */
411 return (data - (u8 *)(*pkt));
412}
413
414
415/* Sends a manangement packet
416 * FIXME: document the use of the arg parameter
417 * for _AUTH: (transaction #) | (status << 16)
418 */
419int
420ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
421 void *ptrarg, u32 type, u32 arg)
422{
423 void *pkt = NULL;
424 u32 pkt_size = 0;
425 int encrypt_mpdu = 0;
426
427 switch(type) {
428 case IEEE80211_STYPE_ASSOC_REQ:
429 pkt_size = ieee80211softmac_assoc_req((struct ieee80211_assoc_request **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg);
430 break;
431 case IEEE80211_STYPE_REASSOC_REQ:
432 pkt_size = ieee80211softmac_reassoc_req((struct ieee80211_reassoc_request **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg);
433 break;
434 case IEEE80211_STYPE_AUTH:
435 pkt_size = ieee80211softmac_auth((struct ieee80211_auth **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg, (u16)(arg & 0xFFFF), (u16) (arg >> 16), &encrypt_mpdu);
436 break;
437 case IEEE80211_STYPE_DISASSOC:
438 case IEEE80211_STYPE_DEAUTH:
439 pkt_size = ieee80211softmac_disassoc_deauth((struct ieee80211_disassoc **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg, type, (u16)(arg & 0xFFFF));
440 break;
441 case IEEE80211_STYPE_PROBE_REQ:
442 pkt_size = ieee80211softmac_probe_req((struct ieee80211_probe_request **)(&pkt), mac, (struct ieee80211softmac_essid *)ptrarg);
443 break;
444 case IEEE80211_STYPE_PROBE_RESP:
445 pkt_size = ieee80211softmac_probe_resp((struct ieee80211_probe_response **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg);
446 break;
447 default:
448 printkl(KERN_DEBUG PFX "Unsupported Management Frame type: %i\n", type);
449 return -EINVAL;
450 };
451
452 if(pkt_size == 0 || pkt == NULL) {
453 printkl(KERN_DEBUG PFX "Error, packet is nonexistant or 0 length\n");
454 return -ENOMEM;
455 }
456
457 /* Send the packet to the ieee80211 layer for tx */
458 /* we defined softmac->mgmt_xmit for this. Should we keep it
459 * as it is (that means we'd need to wrap this into a txb),
460 * modify the prototype (so it matches this function),
461 * or get rid of it alltogether?
462 * Does this work for you now?
463 */
464 ieee80211_tx_frame(mac->ieee, (struct ieee80211_hdr *)pkt,
465 IEEE80211_3ADDR_LEN, pkt_size, encrypt_mpdu);
466
467 kfree(pkt);
468 return 0;
469}
470
471/* Beacon handling */
472int ieee80211softmac_handle_beacon(struct net_device *dev,
473 struct ieee80211_beacon *beacon,
474 struct ieee80211_network *network)
475{
476 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
477
478 /* This might race, but we don't really care and it's not worth
479 * adding heavyweight locking in this fastpath.
480 */
481 if (mac->associnfo.associated) {
482 if (memcmp(network->bssid, mac->associnfo.bssid, ETH_ALEN) == 0)
483 ieee80211softmac_process_erp(mac, network->erp_value);
484 }
485
486 return 0;
487}
488
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
deleted file mode 100644
index 07505ca859af..000000000000
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ /dev/null
@@ -1,568 +0,0 @@
1/*
2 * Contains some basic softmac functions along with module registration code etc.
3 *
4 * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net>
5 * Joseph Jezak <josejx@gentoo.org>
6 * Larry Finger <Larry.Finger@lwfinger.net>
7 * Danny van Dyk <kugelfang@gentoo.org>
8 * Michael Buesch <mbuesch@freenet.de>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 */
26
27#include "ieee80211softmac_priv.h"
28#include <linux/sort.h>
29#include <linux/etherdevice.h>
30
31struct net_device *alloc_ieee80211softmac(int sizeof_priv)
32{
33 struct ieee80211softmac_device *softmac;
34 struct net_device *dev;
35
36 dev = alloc_ieee80211(sizeof(*softmac) + sizeof_priv);
37 if (!dev)
38 return NULL;
39 softmac = ieee80211_priv(dev);
40 softmac->wq = create_freezeable_workqueue("softmac");
41 if (!softmac->wq) {
42 free_ieee80211(dev);
43 return NULL;
44 }
45
46 softmac->dev = dev;
47 softmac->ieee = netdev_priv(dev);
48 spin_lock_init(&softmac->lock);
49
50 softmac->ieee->handle_auth = ieee80211softmac_auth_resp;
51 softmac->ieee->handle_deauth = ieee80211softmac_deauth_resp;
52 softmac->ieee->handle_assoc_response = ieee80211softmac_handle_assoc_response;
53 softmac->ieee->handle_reassoc_request = ieee80211softmac_handle_reassoc_req;
54 softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc;
55 softmac->ieee->handle_beacon = ieee80211softmac_handle_beacon;
56 softmac->scaninfo = NULL;
57
58 softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
59
60 /* TODO: initialise all the other callbacks in the ieee struct
61 * (once they're written)
62 */
63
64 INIT_LIST_HEAD(&softmac->auth_queue);
65 INIT_LIST_HEAD(&softmac->network_list);
66 INIT_LIST_HEAD(&softmac->events);
67
68 mutex_init(&softmac->associnfo.mutex);
69 INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
70 INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
71 softmac->start_scan = ieee80211softmac_start_scan_implementation;
72 softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
73 softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
74
75 /* to start with, we can't send anything ... */
76 netif_carrier_off(dev);
77
78 return dev;
79}
80EXPORT_SYMBOL_GPL(alloc_ieee80211softmac);
81
82/* Clears the pending work queue items, stops all scans, etc. */
83void
84ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm)
85{
86 unsigned long flags;
87 struct ieee80211softmac_event *eventptr, *eventtmp;
88 struct ieee80211softmac_auth_queue_item *authptr, *authtmp;
89 struct ieee80211softmac_network *netptr, *nettmp;
90
91 ieee80211softmac_stop_scan(sm);
92 ieee80211softmac_wait_for_scan(sm);
93
94 spin_lock_irqsave(&sm->lock, flags);
95 sm->running = 0;
96
97 /* Free all pending assoc work items */
98 cancel_delayed_work(&sm->associnfo.work);
99
100 /* Free all pending scan work items */
101 if(sm->scaninfo != NULL)
102 cancel_delayed_work(&sm->scaninfo->softmac_scan);
103
104 /* Free all pending auth work items */
105 list_for_each_entry(authptr, &sm->auth_queue, list)
106 cancel_delayed_work(&authptr->work);
107
108 /* delete all pending event calls and work items */
109 list_for_each_entry_safe(eventptr, eventtmp, &sm->events, list)
110 cancel_delayed_work(&eventptr->work);
111
112 spin_unlock_irqrestore(&sm->lock, flags);
113 flush_workqueue(sm->wq);
114
115 /* now we should be save and no longer need locking... */
116 spin_lock_irqsave(&sm->lock, flags);
117 /* Free all pending auth work items */
118 list_for_each_entry_safe(authptr, authtmp, &sm->auth_queue, list) {
119 list_del(&authptr->list);
120 kfree(authptr);
121 }
122
123 /* delete all pending event calls and work items */
124 list_for_each_entry_safe(eventptr, eventtmp, &sm->events, list) {
125 list_del(&eventptr->list);
126 kfree(eventptr);
127 }
128
129 /* Free all networks */
130 list_for_each_entry_safe(netptr, nettmp, &sm->network_list, list) {
131 ieee80211softmac_del_network_locked(sm, netptr);
132 if(netptr->challenge != NULL)
133 kfree(netptr->challenge);
134 kfree(netptr);
135 }
136
137 spin_unlock_irqrestore(&sm->lock, flags);
138}
139EXPORT_SYMBOL_GPL(ieee80211softmac_clear_pending_work);
140
141void free_ieee80211softmac(struct net_device *dev)
142{
143 struct ieee80211softmac_device *sm = ieee80211_priv(dev);
144 ieee80211softmac_clear_pending_work(sm);
145 kfree(sm->scaninfo);
146 kfree(sm->wpa.IE);
147 destroy_workqueue(sm->wq);
148 free_ieee80211(dev);
149}
150EXPORT_SYMBOL_GPL(free_ieee80211softmac);
151
152static void ieee80211softmac_start_check_rates(struct ieee80211softmac_device *mac)
153{
154 struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo;
155 /* I took out the sorting check, we're seperating by modulation now. */
156 if (ri->count)
157 return;
158 /* otherwise assume we hav'em all! */
159 if (mac->ieee->modulation & IEEE80211_CCK_MODULATION) {
160 ri->rates[ri->count++] = IEEE80211_CCK_RATE_1MB;
161 ri->rates[ri->count++] = IEEE80211_CCK_RATE_2MB;
162 ri->rates[ri->count++] = IEEE80211_CCK_RATE_5MB;
163 ri->rates[ri->count++] = IEEE80211_CCK_RATE_11MB;
164 }
165 if (mac->ieee->modulation & IEEE80211_OFDM_MODULATION) {
166 ri->rates[ri->count++] = IEEE80211_OFDM_RATE_6MB;
167 ri->rates[ri->count++] = IEEE80211_OFDM_RATE_9MB;
168 ri->rates[ri->count++] = IEEE80211_OFDM_RATE_12MB;
169 ri->rates[ri->count++] = IEEE80211_OFDM_RATE_18MB;
170 ri->rates[ri->count++] = IEEE80211_OFDM_RATE_24MB;
171 ri->rates[ri->count++] = IEEE80211_OFDM_RATE_36MB;
172 ri->rates[ri->count++] = IEEE80211_OFDM_RATE_48MB;
173 ri->rates[ri->count++] = IEEE80211_OFDM_RATE_54MB;
174 }
175}
176
177int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate)
178{
179 int search;
180 u8 search_rate;
181
182 for (search = 0; search < ri->count; search++) {
183 search_rate = ri->rates[search];
184 search_rate &= ~IEEE80211_BASIC_RATE_MASK;
185 if (rate == search_rate)
186 return 1;
187 }
188
189 return 0;
190}
191
192u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac,
193 struct ieee80211softmac_ratesinfo *ri, int basic_only)
194{
195 u8 user_rate = mac->txrates.user_rate;
196 int i;
197
198 if (ri->count == 0)
199 return IEEE80211_CCK_RATE_1MB;
200
201 for (i = ri->count - 1; i >= 0; i--) {
202 u8 rate = ri->rates[i];
203 if (basic_only && !(rate & IEEE80211_BASIC_RATE_MASK))
204 continue;
205 rate &= ~IEEE80211_BASIC_RATE_MASK;
206 if (rate > user_rate)
207 continue;
208 if (ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate))
209 return rate;
210 }
211
212 /* If we haven't found a suitable rate by now, just trust the user */
213 return user_rate;
214}
215EXPORT_SYMBOL_GPL(ieee80211softmac_highest_supported_rate);
216
217void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac,
218 u8 erp_value)
219{
220 int use_protection;
221 int short_preamble;
222 u32 changes = 0;
223
224 /* Barker preamble mode */
225 short_preamble = ((erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0
226 && mac->associnfo.short_preamble_available) ? 1 : 0;
227
228 /* Protection needed? */
229 use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
230
231 if (mac->bssinfo.short_preamble != short_preamble) {
232 changes |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE;
233 mac->bssinfo.short_preamble = short_preamble;
234 }
235
236 if (mac->bssinfo.use_protection != use_protection) {
237 changes |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION;
238 mac->bssinfo.use_protection = use_protection;
239 }
240
241 if (mac->bssinfo_change && changes)
242 mac->bssinfo_change(mac->dev, changes);
243}
244
245void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac)
246{
247 struct ieee80211softmac_txrates *txrates = &mac->txrates;
248 u32 change = 0;
249
250 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
251 txrates->default_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 0);
252
253 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
254 txrates->default_fallback = lower_rate(mac, txrates->default_rate);
255
256 change |= IEEE80211SOFTMAC_TXRATECHG_MCAST;
257 txrates->mcast_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 1);
258
259 if (mac->txrates_change)
260 mac->txrates_change(mac->dev, change);
261
262}
263
264void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac)
265{
266 struct ieee80211_device *ieee = mac->ieee;
267 u32 change = 0;
268 struct ieee80211softmac_txrates *txrates = &mac->txrates;
269 struct ieee80211softmac_bss_info *bssinfo = &mac->bssinfo;
270
271 /* TODO: We need some kind of state machine to lower the default rates
272 * if we loose too many packets.
273 */
274 /* Change the default txrate to the highest possible value.
275 * The txrate machine will lower it, if it is too high.
276 */
277 if (ieee->modulation & IEEE80211_OFDM_MODULATION)
278 txrates->user_rate = IEEE80211_OFDM_RATE_24MB;
279 else
280 txrates->user_rate = IEEE80211_CCK_RATE_11MB;
281
282 txrates->default_rate = IEEE80211_CCK_RATE_1MB;
283 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
284
285 txrates->default_fallback = IEEE80211_CCK_RATE_1MB;
286 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
287
288 txrates->mcast_rate = IEEE80211_CCK_RATE_1MB;
289 change |= IEEE80211SOFTMAC_TXRATECHG_MCAST;
290
291 txrates->mgt_mcast_rate = IEEE80211_CCK_RATE_1MB;
292 change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST;
293
294 if (mac->txrates_change)
295 mac->txrates_change(mac->dev, change);
296
297 change = 0;
298
299 bssinfo->supported_rates.count = 0;
300 memset(bssinfo->supported_rates.rates, 0,
301 sizeof(bssinfo->supported_rates.rates));
302 change |= IEEE80211SOFTMAC_BSSINFOCHG_RATES;
303
304 bssinfo->short_preamble = 0;
305 change |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE;
306
307 bssinfo->use_protection = 0;
308 change |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION;
309
310 if (mac->bssinfo_change)
311 mac->bssinfo_change(mac->dev, change);
312
313 mac->running = 1;
314}
315
316void ieee80211softmac_start(struct net_device *dev)
317{
318 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
319
320 ieee80211softmac_start_check_rates(mac);
321 ieee80211softmac_init_bss(mac);
322}
323EXPORT_SYMBOL_GPL(ieee80211softmac_start);
324
325void ieee80211softmac_stop(struct net_device *dev)
326{
327 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
328
329 ieee80211softmac_clear_pending_work(mac);
330}
331EXPORT_SYMBOL_GPL(ieee80211softmac_stop);
332
333void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates)
334{
335 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
336 unsigned long flags;
337
338 spin_lock_irqsave(&mac->lock, flags);
339 memcpy(mac->ratesinfo.rates, rates, count);
340 mac->ratesinfo.count = count;
341 spin_unlock_irqrestore(&mac->lock, flags);
342}
343EXPORT_SYMBOL_GPL(ieee80211softmac_set_rates);
344
345static u8 raise_rate(struct ieee80211softmac_device *mac, u8 rate)
346{
347 int i;
348 struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo;
349
350 for (i=0; i<ri->count-1; i++) {
351 if (ri->rates[i] == rate)
352 return ri->rates[i+1];
353 }
354 /* I guess we can't go any higher... */
355 return ri->rates[ri->count];
356}
357
358u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta)
359{
360 int i;
361 struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo;
362
363 for (i=delta; i<ri->count; i++) {
364 if (ri->rates[i] == rate)
365 return ri->rates[i-delta];
366 }
367 /* I guess we can't go any lower... */
368 return ri->rates[0];
369}
370
371static void ieee80211softmac_add_txrates_badness(struct ieee80211softmac_device *mac,
372 int amount)
373{
374 u8 default_rate = mac->txrates.default_rate;
375 u8 default_fallback = mac->txrates.default_fallback;
376 u32 changes = 0;
377
378 //TODO: This is highly experimental code.
379 // Maybe the dynamic rate selection does not work
380 // and it has to be removed again.
381
382printk("badness %d\n", mac->txrate_badness);
383 mac->txrate_badness += amount;
384 if (mac->txrate_badness <= -1000) {
385 /* Very small badness. Try a faster bitrate. */
386 default_rate = raise_rate(mac, default_rate);
387 changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
388 default_fallback = get_fallback_rate(mac, default_rate);
389 changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
390 mac->txrate_badness = 0;
391printk("Bitrate raised to %u\n", default_rate);
392 } else if (mac->txrate_badness >= 10000) {
393 /* Very high badness. Try a slower bitrate. */
394 default_rate = lower_rate(mac, default_rate);
395 changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
396 default_fallback = get_fallback_rate(mac, default_rate);
397 changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
398 mac->txrate_badness = 0;
399printk("Bitrate lowered to %u\n", default_rate);
400 }
401
402 mac->txrates.default_rate = default_rate;
403 mac->txrates.default_fallback = default_fallback;
404
405 if (changes && mac->txrates_change)
406 mac->txrates_change(mac->dev, changes);
407}
408
409void ieee80211softmac_fragment_lost(struct net_device *dev,
410 u16 wl_seq)
411{
412 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
413 unsigned long flags;
414
415 spin_lock_irqsave(&mac->lock, flags);
416 ieee80211softmac_add_txrates_badness(mac, 1000);
417 //TODO
418
419 spin_unlock_irqrestore(&mac->lock, flags);
420}
421EXPORT_SYMBOL_GPL(ieee80211softmac_fragment_lost);
422
423static int rate_cmp(const void *a_, const void *b_) {
424 u8 *a, *b;
425 a = (u8*)a_;
426 b = (u8*)b_;
427 return ((*a & ~IEEE80211_BASIC_RATE_MASK) - (*b & ~IEEE80211_BASIC_RATE_MASK));
428}
429
430/* Allocate a softmac network struct and fill it from a network */
431struct ieee80211softmac_network *
432ieee80211softmac_create_network(struct ieee80211softmac_device *mac,
433 struct ieee80211_network *net)
434{
435 struct ieee80211softmac_network *softnet;
436 softnet = kzalloc(sizeof(struct ieee80211softmac_network), GFP_ATOMIC);
437 if(softnet == NULL)
438 return NULL;
439 memcpy(softnet->bssid, net->bssid, ETH_ALEN);
440 softnet->channel = net->channel;
441 softnet->essid.len = net->ssid_len;
442 memcpy(softnet->essid.data, net->ssid, softnet->essid.len);
443
444 /* copy rates over */
445 softnet->supported_rates.count = net->rates_len;
446 memcpy(&softnet->supported_rates.rates[0], net->rates, net->rates_len);
447 memcpy(&softnet->supported_rates.rates[softnet->supported_rates.count], net->rates_ex, net->rates_ex_len);
448 softnet->supported_rates.count += net->rates_ex_len;
449 sort(softnet->supported_rates.rates, softnet->supported_rates.count, sizeof(softnet->supported_rates.rates[0]), rate_cmp, NULL);
450
451 /* we save the ERP value because it is needed at association time, and
452 * many AP's do not include an ERP IE in the association response. */
453 softnet->erp_value = net->erp_value;
454
455 softnet->capabilities = net->capability;
456 return softnet;
457}
458
459
460/* Add a network to the list, while locked */
461void
462ieee80211softmac_add_network_locked(struct ieee80211softmac_device *mac,
463 struct ieee80211softmac_network *add_net)
464{
465 struct ieee80211softmac_network *softmac_net;
466
467 list_for_each_entry(softmac_net, &mac->network_list, list) {
468 if(!memcmp(softmac_net->bssid, add_net->bssid, ETH_ALEN))
469 return;
470 }
471 list_add(&(add_net->list), &mac->network_list);
472}
473
474/* Add a network to the list, with locking */
475void
476ieee80211softmac_add_network(struct ieee80211softmac_device *mac,
477 struct ieee80211softmac_network *add_net)
478{
479 unsigned long flags;
480 spin_lock_irqsave(&mac->lock, flags);
481 ieee80211softmac_add_network_locked(mac, add_net);
482 spin_unlock_irqrestore(&mac->lock, flags);
483}
484
485
486/* Delete a network from the list, while locked*/
487void
488ieee80211softmac_del_network_locked(struct ieee80211softmac_device *mac,
489 struct ieee80211softmac_network *del_net)
490{
491 list_del(&(del_net->list));
492}
493
494/* Delete a network from the list with locking */
495void
496ieee80211softmac_del_network(struct ieee80211softmac_device *mac,
497 struct ieee80211softmac_network *del_net)
498{
499 unsigned long flags;
500 spin_lock_irqsave(&mac->lock, flags);
501 ieee80211softmac_del_network_locked(mac, del_net);
502 spin_unlock_irqrestore(&mac->lock, flags);
503}
504
505/* Get a network from the list by MAC while locked */
506struct ieee80211softmac_network *
507ieee80211softmac_get_network_by_bssid_locked(struct ieee80211softmac_device *mac,
508 u8 *bssid)
509{
510 struct ieee80211softmac_network *softmac_net;
511
512 list_for_each_entry(softmac_net, &mac->network_list, list) {
513 if(!memcmp(softmac_net->bssid, bssid, ETH_ALEN))
514 return softmac_net;
515 }
516 return NULL;
517}
518
519/* Get a network from the list by BSSID with locking */
520struct ieee80211softmac_network *
521ieee80211softmac_get_network_by_bssid(struct ieee80211softmac_device *mac,
522 u8 *bssid)
523{
524 unsigned long flags;
525 struct ieee80211softmac_network *softmac_net;
526
527 spin_lock_irqsave(&mac->lock, flags);
528 softmac_net = ieee80211softmac_get_network_by_bssid_locked(mac, bssid);
529 spin_unlock_irqrestore(&mac->lock, flags);
530 return softmac_net;
531}
532
533/* Get a network from the list by ESSID while locked */
534struct ieee80211softmac_network *
535ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac,
536 struct ieee80211softmac_essid *essid)
537{
538 struct ieee80211softmac_network *softmac_net;
539
540 list_for_each_entry(softmac_net, &mac->network_list, list) {
541 if (softmac_net->essid.len == essid->len &&
542 !memcmp(softmac_net->essid.data, essid->data, essid->len))
543 return softmac_net;
544 }
545 return NULL;
546}
547
548/* Get a network from the list by ESSID with locking */
549struct ieee80211softmac_network *
550ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac,
551 struct ieee80211softmac_essid *essid)
552{
553 unsigned long flags;
554 struct ieee80211softmac_network *softmac_net = NULL;
555
556 spin_lock_irqsave(&mac->lock, flags);
557 softmac_net = ieee80211softmac_get_network_by_essid_locked(mac, essid);
558 spin_unlock_irqrestore(&mac->lock, flags);
559 return softmac_net;
560}
561
562MODULE_LICENSE("GPL");
563MODULE_AUTHOR("Johannes Berg");
564MODULE_AUTHOR("Joseph Jezak");
565MODULE_AUTHOR("Larry Finger");
566MODULE_AUTHOR("Danny van Dyk");
567MODULE_AUTHOR("Michael Buesch");
568MODULE_DESCRIPTION("802.11 software MAC");
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
deleted file mode 100644
index c43b189634df..000000000000
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ /dev/null
@@ -1,244 +0,0 @@
1/*
2 * Internal softmac API definitions.
3 *
4 * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net>
5 * Joseph Jezak <josejx@gentoo.org>
6 * Larry Finger <Larry.Finger@lwfinger.net>
7 * Danny van Dyk <kugelfang@gentoo.org>
8 * Michael Buesch <mbuesch@freenet.de>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 */
26
27#ifndef IEEE80211SOFTMAC_PRIV_H_
28#define IEEE80211SOFTMAC_PRIV_H_
29
30#include <net/ieee80211softmac.h>
31#include <net/ieee80211softmac_wx.h>
32#include <linux/kernel.h>
33#include <linux/stringify.h>
34
35
36#define PFX "SoftMAC: "
37
38#ifdef assert
39# undef assert
40#endif
41#ifdef CONFIG_IEEE80211_SOFTMAC_DEBUG
42#define assert(expr) \
43 do { \
44 if (unlikely(!(expr))) { \
45 printkl(KERN_ERR PFX "ASSERTION FAILED (%s) at: %s:%d:%s()\n", #expr, \
46 __FILE__, __LINE__, __FUNCTION__); \
47 } \
48 } while (0)
49#else
50#define assert(expr) do {} while (0)
51#endif
52
53/* rate limited printk(). */
54#ifdef printkl
55# undef printkl
56#endif
57#define printkl(f, x...) do { if (printk_ratelimit()) printk(f ,##x); } while (0)
58/* rate limited printk() for debugging */
59#ifdef dprintkl
60# undef dprintkl
61#endif
62#ifdef CONFIG_IEEE80211_SOFTMAC_DEBUG
63# define dprintkl printkl
64#else
65# define dprintkl(f, x...) do { /* nothing */ } while (0)
66#endif
67
68/* debugging printk() */
69#ifdef dprintk
70# undef dprintk
71#endif
72#ifdef CONFIG_IEEE80211_SOFTMAC_DEBUG
73# define dprintk(f, x...) do { printk(f ,##x); } while (0)
74#else
75# define dprintk(f, x...) do { /* nothing */ } while (0)
76#endif
77
78/* private definitions and prototypes */
79
80/*** prototypes from _scan.c */
81void ieee80211softmac_scan(struct work_struct *work);
82/* for internal use if scanning is needed */
83int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
84void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
85void ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *mac);
86
87/* for use by _module.c to assign to the callbacks */
88int ieee80211softmac_start_scan_implementation(struct net_device *dev);
89void ieee80211softmac_stop_scan_implementation(struct net_device *dev);
90void ieee80211softmac_wait_for_scan_implementation(struct net_device *dev);
91
92/*** Network prototypes from _module.c */
93struct ieee80211softmac_network * ieee80211softmac_create_network(
94 struct ieee80211softmac_device *mac, struct ieee80211_network *net);
95void ieee80211softmac_add_network_locked(struct ieee80211softmac_device *mac,
96 struct ieee80211softmac_network *net);
97void ieee80211softmac_add_network(struct ieee80211softmac_device *mac,
98 struct ieee80211softmac_network *net);
99void ieee80211softmac_del_network_locked(struct ieee80211softmac_device *mac,
100 struct ieee80211softmac_network *net);
101void ieee80211softmac_del_network(struct ieee80211softmac_device *mac,
102 struct ieee80211softmac_network *net);
103struct ieee80211softmac_network * ieee80211softmac_get_network_by_bssid_locked(
104 struct ieee80211softmac_device *mac, u8 *ea);
105struct ieee80211softmac_network * ieee80211softmac_get_network_by_bssid(
106 struct ieee80211softmac_device *mac, u8 *ea);
107struct ieee80211softmac_network * ieee80211softmac_get_network_by_ssid_locked(
108 struct ieee80211softmac_device *mac, u8 *ssid, u8 ssid_len);
109struct ieee80211softmac_network * ieee80211softmac_get_network_by_ssid(
110 struct ieee80211softmac_device *mac, u8 *ssid, u8 ssid_len);
111struct ieee80211softmac_network *
112ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac,
113 struct ieee80211softmac_essid *essid);
114struct ieee80211softmac_network *
115ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac,
116 struct ieee80211softmac_essid *essid);
117
118/* Rates related */
119void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac,
120 u8 erp_value);
121int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate);
122u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta);
123void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac);
124void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac);
125static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) {
126 return ieee80211softmac_lower_rate_delta(mac, rate, 1);
127}
128
129static inline u8 get_fallback_rate(struct ieee80211softmac_device *mac, u8 rate)
130{
131 return ieee80211softmac_lower_rate_delta(mac, rate, 2);
132}
133
134
135/*** prototypes from _io.c */
136int ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
137 void* ptrarg, u32 type, u32 arg);
138int ieee80211softmac_handle_beacon(struct net_device *dev,
139 struct ieee80211_beacon *beacon,
140 struct ieee80211_network *network);
141
142/*** prototypes from _auth.c */
143/* do these have to go into the public header? */
144int ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net);
145int ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net, int reason);
146
147/* for use by _module.c to assign to the callbacks */
148int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth);
149int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
150
151/*** prototypes from _assoc.c */
152void ieee80211softmac_assoc_work(struct work_struct *work);
153int ieee80211softmac_handle_assoc_response(struct net_device * dev,
154 struct ieee80211_assoc_response * resp,
155 struct ieee80211_network * network);
156int ieee80211softmac_handle_disassoc(struct net_device * dev,
157 struct ieee80211_disassoc * disassoc);
158int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
159 struct ieee80211_reassoc_request * reassoc);
160void ieee80211softmac_assoc_timeout(struct work_struct *work);
161void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
162void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
163
164/* some helper functions */
165static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm)
166{
167 return (sm->start_scan == ieee80211softmac_start_scan_implementation) &&
168 (sm->stop_scan == ieee80211softmac_stop_scan_implementation) &&
169 (sm->wait_for_scan == ieee80211softmac_wait_for_scan_implementation);
170}
171
172static inline int ieee80211softmac_scan_sanity_check(struct ieee80211softmac_device *sm)
173{
174 return ((sm->start_scan != ieee80211softmac_start_scan_implementation) &&
175 (sm->stop_scan != ieee80211softmac_stop_scan_implementation) &&
176 (sm->wait_for_scan != ieee80211softmac_wait_for_scan_implementation)
177 ) || ieee80211softmac_scan_handlers_check_self(sm);
178}
179
180#define IEEE80211SOFTMAC_PROBE_DELAY HZ/50
181#define IEEE80211SOFTMAC_WORKQUEUE_NAME_LEN (17 + IFNAMSIZ)
182
183struct ieee80211softmac_network {
184 struct list_head list; /* List */
185 /* Network information copied from ieee80211_network */
186 u8 bssid[ETH_ALEN];
187 u8 channel;
188 struct ieee80211softmac_essid essid;
189
190 struct ieee80211softmac_ratesinfo supported_rates;
191
192 /* SoftMAC specific */
193 u16 authenticating:1, /* Status Flags */
194 authenticated:1,
195 auth_desynced_once:1;
196
197 u8 erp_value; /* Saved ERP value */
198 u16 capabilities; /* Capabilities bitfield */
199 u8 challenge_len; /* Auth Challenge length */
200 char *challenge; /* Challenge Text */
201};
202
203/* structure used to keep track of networks we're auth'ing to */
204struct ieee80211softmac_auth_queue_item {
205 struct list_head list; /* List head */
206 struct ieee80211softmac_network *net; /* Network to auth */
207 struct ieee80211softmac_device *mac; /* SoftMAC device */
208 u8 retry; /* Retry limit */
209 u8 state; /* Auth State */
210 struct delayed_work work; /* Work queue */
211};
212
213/* scanning information */
214struct ieee80211softmac_scaninfo {
215 u8 current_channel_idx,
216 number_channels;
217 struct ieee80211_channel *channels;
218 u8 started:1,
219 stop:1;
220 u8 skip_flags;
221 struct completion finished;
222 struct delayed_work softmac_scan;
223 struct ieee80211softmac_device *mac;
224};
225
226/* private event struct */
227struct ieee80211softmac_event {
228 struct list_head list;
229 int event_type;
230 void *event_context;
231 struct delayed_work work;
232 notify_function_ptr fun;
233 void *context;
234 struct ieee80211softmac_device *mac;
235};
236
237void ieee80211softmac_call_events(struct ieee80211softmac_device *mac, int event, void *event_context);
238void ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int event, void *event_context);
239int ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
240 int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask);
241
242void ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac);
243
244#endif /* IEEE80211SOFTMAC_PRIV_H_ */
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
deleted file mode 100644
index bfab8d7db88f..000000000000
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ /dev/null
@@ -1,254 +0,0 @@
1/*
2 * Scanning routines.
3 *
4 * These are not exported because they're assigned to the function pointers.
5 *
6 * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net>
7 * Joseph Jezak <josejx@gentoo.org>
8 * Larry Finger <Larry.Finger@lwfinger.net>
9 * Danny van Dyk <kugelfang@gentoo.org>
10 * Michael Buesch <mbuesch@freenet.de>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * The full GNU General Public License is included in this distribution in the
26 * file called COPYING.
27 */
28
29#include <linux/completion.h>
30#include "ieee80211softmac_priv.h"
31
32/* internal, use to trigger scanning if needed.
33 * Returns -EBUSY if already scanning,
34 * result of start_scan otherwise */
35int
36ieee80211softmac_start_scan(struct ieee80211softmac_device *sm)
37{
38 unsigned long flags;
39 int ret;
40
41 spin_lock_irqsave(&sm->lock, flags);
42 if (sm->scanning)
43 {
44 spin_unlock_irqrestore(&sm->lock, flags);
45 return -EINPROGRESS;
46 }
47 sm->scanning = 1;
48 spin_unlock_irqrestore(&sm->lock, flags);
49
50 ret = sm->start_scan(sm->dev);
51 if (ret) {
52 spin_lock_irqsave(&sm->lock, flags);
53 sm->scanning = 0;
54 spin_unlock_irqrestore(&sm->lock, flags);
55 }
56 return ret;
57}
58
59void
60ieee80211softmac_stop_scan(struct ieee80211softmac_device *sm)
61{
62 unsigned long flags;
63
64 spin_lock_irqsave(&sm->lock, flags);
65
66 if (!sm->scanning) {
67 spin_unlock_irqrestore(&sm->lock, flags);
68 return;
69 }
70
71 spin_unlock_irqrestore(&sm->lock, flags);
72 sm->stop_scan(sm->dev);
73}
74
75void
76ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm)
77{
78 unsigned long flags;
79
80 spin_lock_irqsave(&sm->lock, flags);
81
82 if (!sm->scanning) {
83 spin_unlock_irqrestore(&sm->lock, flags);
84 return;
85 }
86
87 spin_unlock_irqrestore(&sm->lock, flags);
88 sm->wait_for_scan(sm->dev);
89}
90
91
92/* internal scanning implementation follows */
93void ieee80211softmac_scan(struct work_struct *work)
94{
95 int invalid_channel;
96 u8 current_channel_idx;
97 struct ieee80211softmac_scaninfo *si =
98 container_of(work, struct ieee80211softmac_scaninfo,
99 softmac_scan.work);
100 struct ieee80211softmac_device *sm = si->mac;
101 unsigned long flags;
102
103 while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
104 current_channel_idx = si->current_channel_idx;
105 si->current_channel_idx++; /* go to the next channel */
106
107 invalid_channel = (si->skip_flags & si->channels[current_channel_idx].flags);
108
109 if (!invalid_channel) {
110 sm->set_channel(sm->dev, si->channels[current_channel_idx].channel);
111 // FIXME make this user configurable (active/passive)
112 if(ieee80211softmac_send_mgt_frame(sm, NULL, IEEE80211_STYPE_PROBE_REQ, 0))
113 printkl(KERN_DEBUG PFX "Sending Probe Request Failed\n");
114
115 /* also send directed management frame for the network we're looking for */
116 // TODO: is this if correct, or should we do this only if scanning from assoc request?
117 if (sm->associnfo.req_essid.len)
118 ieee80211softmac_send_mgt_frame(sm, &sm->associnfo.req_essid, IEEE80211_STYPE_PROBE_REQ, 0);
119
120 spin_lock_irqsave(&sm->lock, flags);
121 if (unlikely(!sm->running)) {
122 /* Prevent reschedule on workqueue flush */
123 spin_unlock_irqrestore(&sm->lock, flags);
124 break;
125 }
126 queue_delayed_work(sm->wq, &si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY);
127 spin_unlock_irqrestore(&sm->lock, flags);
128 return;
129 } else {
130 dprintk(PFX "Not probing Channel %d (not allowed here)\n", si->channels[current_channel_idx].channel);
131 }
132 }
133
134 spin_lock_irqsave(&sm->lock, flags);
135 cancel_delayed_work(&si->softmac_scan);
136 si->started = 0;
137 spin_unlock_irqrestore(&sm->lock, flags);
138
139 dprintk(PFX "Scanning finished: scanned %d channels starting with channel %d\n",
140 sm->scaninfo->number_channels, sm->scaninfo->channels[0].channel);
141 ieee80211softmac_scan_finished(sm);
142 complete_all(&sm->scaninfo->finished);
143}
144
145static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee80211softmac_device *mac)
146{
147 /* ugh. can we call this without having the spinlock held? */
148 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
149 if (unlikely(!info))
150 return NULL;
151 INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
152 info->mac = mac;
153 init_completion(&info->finished);
154 return info;
155}
156
157int ieee80211softmac_start_scan_implementation(struct net_device *dev)
158{
159 struct ieee80211softmac_device *sm = ieee80211_priv(dev);
160 unsigned long flags;
161
162 if (!(dev->flags & IFF_UP))
163 return -ENODEV;
164
165 assert(ieee80211softmac_scan_handlers_check_self(sm));
166 if (!ieee80211softmac_scan_handlers_check_self(sm))
167 return -EINVAL;
168
169 spin_lock_irqsave(&sm->lock, flags);
170 /* it looks like we need to hold the lock here
171 * to make sure we don't allocate two of these... */
172 if (unlikely(!sm->scaninfo))
173 sm->scaninfo = allocate_scaninfo(sm);
174 if (unlikely(!sm->scaninfo)) {
175 spin_unlock_irqrestore(&sm->lock, flags);
176 return -ENOMEM;
177 }
178
179 sm->scaninfo->skip_flags = IEEE80211_CH_INVALID;
180 if (0 /* not scanning in IEEE802.11b */)//TODO
181 sm->scaninfo->skip_flags |= IEEE80211_CH_B_ONLY;
182 if (0 /* IEEE802.11a */) {//TODO
183 sm->scaninfo->channels = sm->ieee->geo.a;
184 sm->scaninfo->number_channels = sm->ieee->geo.a_channels;
185 } else {
186 sm->scaninfo->channels = sm->ieee->geo.bg;
187 sm->scaninfo->number_channels = sm->ieee->geo.bg_channels;
188 }
189 sm->scaninfo->current_channel_idx = 0;
190 sm->scaninfo->started = 1;
191 sm->scaninfo->stop = 0;
192 INIT_COMPLETION(sm->scaninfo->finished);
193 queue_delayed_work(sm->wq, &sm->scaninfo->softmac_scan, 0);
194 spin_unlock_irqrestore(&sm->lock, flags);
195 return 0;
196}
197
198void ieee80211softmac_stop_scan_implementation(struct net_device *dev)
199{
200 struct ieee80211softmac_device *sm = ieee80211_priv(dev);
201 unsigned long flags;
202
203 assert(ieee80211softmac_scan_handlers_check_self(sm));
204 if (!ieee80211softmac_scan_handlers_check_self(sm))
205 return;
206
207 spin_lock_irqsave(&sm->lock, flags);
208 assert(sm->scaninfo != NULL);
209 if (sm->scaninfo) {
210 if (sm->scaninfo->started)
211 sm->scaninfo->stop = 1;
212 else
213 complete_all(&sm->scaninfo->finished);
214 }
215 spin_unlock_irqrestore(&sm->lock, flags);
216}
217
218void ieee80211softmac_wait_for_scan_implementation(struct net_device *dev)
219{
220 struct ieee80211softmac_device *sm = ieee80211_priv(dev);
221 unsigned long flags;
222
223 assert(ieee80211softmac_scan_handlers_check_self(sm));
224 if (!ieee80211softmac_scan_handlers_check_self(sm))
225 return;
226
227 spin_lock_irqsave(&sm->lock, flags);
228 if (!sm->scaninfo->started) {
229 spin_unlock_irqrestore(&sm->lock, flags);
230 return;
231 }
232 spin_unlock_irqrestore(&sm->lock, flags);
233 wait_for_completion(&sm->scaninfo->finished);
234}
235
236/* this is what drivers (that do scanning) call when they're done */
237void ieee80211softmac_scan_finished(struct ieee80211softmac_device *sm)
238{
239 unsigned long flags;
240
241 spin_lock_irqsave(&sm->lock, flags);
242 sm->scanning = 0;
243 spin_unlock_irqrestore(&sm->lock, flags);
244
245 if (sm->associnfo.bssvalid) {
246 struct ieee80211softmac_network *net;
247
248 net = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
249 if (net)
250 sm->set_channel(sm->dev, net->channel);
251 }
252 ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL);
253}
254EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished);
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
deleted file mode 100644
index e01b59aedc54..000000000000
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ /dev/null
@@ -1,508 +0,0 @@
1/*
2 * This file contains our _wx handlers. Make sure you EXPORT_SYMBOL_GPL them
3 *
4 * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net>
5 * Joseph Jezak <josejx@gentoo.org>
6 * Larry Finger <Larry.Finger@lwfinger.net>
7 * Danny van Dyk <kugelfang@gentoo.org>
8 * Michael Buesch <mbuesch@freenet.de>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 */
26
27#include "ieee80211softmac_priv.h"
28
29#include <net/iw_handler.h>
30/* for is_broadcast_ether_addr and is_zero_ether_addr */
31#include <linux/etherdevice.h>
32
33int
34ieee80211softmac_wx_trigger_scan(struct net_device *net_dev,
35 struct iw_request_info *info,
36 union iwreq_data *data,
37 char *extra)
38{
39 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
40 return ieee80211softmac_start_scan(sm);
41}
42EXPORT_SYMBOL_GPL(ieee80211softmac_wx_trigger_scan);
43
44
45/* if we're still scanning, return -EAGAIN so that userspace tools
46 * can get the complete scan results, otherwise return 0. */
47int
48ieee80211softmac_wx_get_scan_results(struct net_device *net_dev,
49 struct iw_request_info *info,
50 union iwreq_data *data,
51 char *extra)
52{
53 unsigned long flags;
54 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
55
56 spin_lock_irqsave(&sm->lock, flags);
57 if (sm->scanning) {
58 spin_unlock_irqrestore(&sm->lock, flags);
59 return -EAGAIN;
60 }
61 spin_unlock_irqrestore(&sm->lock, flags);
62 return ieee80211_wx_get_scan(sm->ieee, info, data, extra);
63}
64EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_scan_results);
65
66int
67ieee80211softmac_wx_set_essid(struct net_device *net_dev,
68 struct iw_request_info *info,
69 union iwreq_data *data,
70 char *extra)
71{
72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
73 struct ieee80211softmac_auth_queue_item *authptr;
74 int length = 0;
75 DECLARE_MAC_BUF(mac);
76
77check_assoc_again:
78 mutex_lock(&sm->associnfo.mutex);
79 if((sm->associnfo.associating || sm->associnfo.associated) &&
80 (data->essid.flags && data->essid.length)) {
81 dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
82 /* Cancel assoc work */
83 cancel_delayed_work(&sm->associnfo.work);
84 /* We don't have to do this, but it's a little cleaner */
85 list_for_each_entry(authptr, &sm->auth_queue, list)
86 cancel_delayed_work(&authptr->work);
87 sm->associnfo.bssvalid = 0;
88 sm->associnfo.bssfixed = 0;
89 sm->associnfo.associating = 0;
90 sm->associnfo.associated = 0;
91 /* We must unlock to avoid deadlocks with the assoc workqueue
92 * on the associnfo.mutex */
93 mutex_unlock(&sm->associnfo.mutex);
94 flush_workqueue(sm->wq);
95 /* Avoid race! Check assoc status again. Maybe someone started an
96 * association while we flushed. */
97 goto check_assoc_again;
98 }
99
100 sm->associnfo.static_essid = 0;
101 sm->associnfo.assoc_wait = 0;
102
103 if (data->essid.flags && data->essid.length) {
104 length = min((int)data->essid.length, IW_ESSID_MAX_SIZE);
105 if (length) {
106 memcpy(sm->associnfo.req_essid.data, extra, length);
107 sm->associnfo.static_essid = 1;
108 }
109 }
110
111 /* set our requested ESSID length.
112 * If applicable, we have already copied the data in */
113 sm->associnfo.req_essid.len = length;
114
115 sm->associnfo.associating = 1;
116 /* queue lower level code to do work (if necessary) */
117 queue_delayed_work(sm->wq, &sm->associnfo.work, 0);
118
119 mutex_unlock(&sm->associnfo.mutex);
120
121 return 0;
122}
123EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_essid);
124
125int
126ieee80211softmac_wx_get_essid(struct net_device *net_dev,
127 struct iw_request_info *info,
128 union iwreq_data *data,
129 char *extra)
130{
131 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
132
133 mutex_lock(&sm->associnfo.mutex);
134 /* If all fails, return ANY (empty) */
135 data->essid.length = 0;
136 data->essid.flags = 0; /* active */
137
138 /* If we have a statically configured ESSID then return it */
139 if (sm->associnfo.static_essid) {
140 data->essid.length = sm->associnfo.req_essid.len;
141 data->essid.flags = 1; /* active */
142 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len);
143 dprintk(KERN_INFO PFX "Getting essid from req_essid\n");
144 } else if (sm->associnfo.associated || sm->associnfo.associating) {
145 /* If we're associating/associated, return that */
146 data->essid.length = sm->associnfo.associate_essid.len;
147 data->essid.flags = 1; /* active */
148 memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len);
149 dprintk(KERN_INFO PFX "Getting essid from associate_essid\n");
150 }
151 mutex_unlock(&sm->associnfo.mutex);
152
153 return 0;
154}
155EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_essid);
156
157int
158ieee80211softmac_wx_set_rate(struct net_device *net_dev,
159 struct iw_request_info *info,
160 union iwreq_data *data,
161 char *extra)
162{
163 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
164 struct ieee80211_device *ieee = mac->ieee;
165 unsigned long flags;
166 s32 in_rate = data->bitrate.value;
167 u8 rate;
168 int is_ofdm = 0;
169 int err = -EINVAL;
170
171 if (in_rate == -1) {
172 if (ieee->modulation & IEEE80211_OFDM_MODULATION)
173 in_rate = 24000000;
174 else
175 in_rate = 11000000;
176 }
177
178 switch (in_rate) {
179 case 1000000:
180 rate = IEEE80211_CCK_RATE_1MB;
181 break;
182 case 2000000:
183 rate = IEEE80211_CCK_RATE_2MB;
184 break;
185 case 5500000:
186 rate = IEEE80211_CCK_RATE_5MB;
187 break;
188 case 11000000:
189 rate = IEEE80211_CCK_RATE_11MB;
190 break;
191 case 6000000:
192 rate = IEEE80211_OFDM_RATE_6MB;
193 is_ofdm = 1;
194 break;
195 case 9000000:
196 rate = IEEE80211_OFDM_RATE_9MB;
197 is_ofdm = 1;
198 break;
199 case 12000000:
200 rate = IEEE80211_OFDM_RATE_12MB;
201 is_ofdm = 1;
202 break;
203 case 18000000:
204 rate = IEEE80211_OFDM_RATE_18MB;
205 is_ofdm = 1;
206 break;
207 case 24000000:
208 rate = IEEE80211_OFDM_RATE_24MB;
209 is_ofdm = 1;
210 break;
211 case 36000000:
212 rate = IEEE80211_OFDM_RATE_36MB;
213 is_ofdm = 1;
214 break;
215 case 48000000:
216 rate = IEEE80211_OFDM_RATE_48MB;
217 is_ofdm = 1;
218 break;
219 case 54000000:
220 rate = IEEE80211_OFDM_RATE_54MB;
221 is_ofdm = 1;
222 break;
223 default:
224 goto out;
225 }
226
227 spin_lock_irqsave(&mac->lock, flags);
228
229 /* Check if correct modulation for this PHY. */
230 if (is_ofdm && !(ieee->modulation & IEEE80211_OFDM_MODULATION))
231 goto out_unlock;
232
233 mac->txrates.user_rate = rate;
234 ieee80211softmac_recalc_txrates(mac);
235 err = 0;
236
237out_unlock:
238 spin_unlock_irqrestore(&mac->lock, flags);
239out:
240 return err;
241}
242EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_rate);
243
244int
245ieee80211softmac_wx_get_rate(struct net_device *net_dev,
246 struct iw_request_info *info,
247 union iwreq_data *data,
248 char *extra)
249{
250 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
251 unsigned long flags;
252 int err = -EINVAL;
253
254 spin_lock_irqsave(&mac->lock, flags);
255
256 if (unlikely(!mac->running)) {
257 err = -ENODEV;
258 goto out_unlock;
259 }
260
261 switch (mac->txrates.default_rate) {
262 case IEEE80211_CCK_RATE_1MB:
263 data->bitrate.value = 1000000;
264 break;
265 case IEEE80211_CCK_RATE_2MB:
266 data->bitrate.value = 2000000;
267 break;
268 case IEEE80211_CCK_RATE_5MB:
269 data->bitrate.value = 5500000;
270 break;
271 case IEEE80211_CCK_RATE_11MB:
272 data->bitrate.value = 11000000;
273 break;
274 case IEEE80211_OFDM_RATE_6MB:
275 data->bitrate.value = 6000000;
276 break;
277 case IEEE80211_OFDM_RATE_9MB:
278 data->bitrate.value = 9000000;
279 break;
280 case IEEE80211_OFDM_RATE_12MB:
281 data->bitrate.value = 12000000;
282 break;
283 case IEEE80211_OFDM_RATE_18MB:
284 data->bitrate.value = 18000000;
285 break;
286 case IEEE80211_OFDM_RATE_24MB:
287 data->bitrate.value = 24000000;
288 break;
289 case IEEE80211_OFDM_RATE_36MB:
290 data->bitrate.value = 36000000;
291 break;
292 case IEEE80211_OFDM_RATE_48MB:
293 data->bitrate.value = 48000000;
294 break;
295 case IEEE80211_OFDM_RATE_54MB:
296 data->bitrate.value = 54000000;
297 break;
298 default:
299 assert(0);
300 goto out_unlock;
301 }
302 err = 0;
303out_unlock:
304 spin_unlock_irqrestore(&mac->lock, flags);
305
306 return err;
307}
308EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_rate);
309
310int
311ieee80211softmac_wx_get_wap(struct net_device *net_dev,
312 struct iw_request_info *info,
313 union iwreq_data *data,
314 char *extra)
315{
316 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
317 int err = 0;
318
319 mutex_lock(&mac->associnfo.mutex);
320 if (mac->associnfo.bssvalid)
321 memcpy(data->ap_addr.sa_data, mac->associnfo.bssid, ETH_ALEN);
322 else
323 memset(data->ap_addr.sa_data, 0xff, ETH_ALEN);
324 data->ap_addr.sa_family = ARPHRD_ETHER;
325 mutex_unlock(&mac->associnfo.mutex);
326
327 return err;
328}
329EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_wap);
330
331int
332ieee80211softmac_wx_set_wap(struct net_device *net_dev,
333 struct iw_request_info *info,
334 union iwreq_data *data,
335 char *extra)
336{
337 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
338
339 /* sanity check */
340 if (data->ap_addr.sa_family != ARPHRD_ETHER) {
341 return -EINVAL;
342 }
343
344 mutex_lock(&mac->associnfo.mutex);
345 if (is_broadcast_ether_addr(data->ap_addr.sa_data)) {
346 /* the bssid we have is not to be fixed any longer,
347 * and we should reassociate to the best AP. */
348 mac->associnfo.bssfixed = 0;
349 /* force reassociation */
350 mac->associnfo.bssvalid = 0;
351 if (mac->associnfo.associated)
352 queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
353 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
354 /* the bssid we have is no longer fixed */
355 mac->associnfo.bssfixed = 0;
356 } else {
357 if (!memcmp(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN)) {
358 if (mac->associnfo.associating || mac->associnfo.associated) {
359 /* bssid unchanged and associated or associating - just return */
360 goto out;
361 }
362 } else {
363 /* copy new value in data->ap_addr.sa_data to bssid */
364 memcpy(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN);
365 }
366 /* tell the other code that this bssid should be used no matter what */
367 mac->associnfo.bssfixed = 1;
368 /* queue associate if new bssid or (old one again and not associated) */
369 queue_delayed_work(mac->wq, &mac->associnfo.work, 0);
370 }
371
372 out:
373 mutex_unlock(&mac->associnfo.mutex);
374
375 return 0;
376}
377EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_wap);
378
379int
380ieee80211softmac_wx_set_genie(struct net_device *dev,
381 struct iw_request_info *info,
382 union iwreq_data *wrqu,
383 char *extra)
384{
385 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
386 unsigned long flags;
387 int err = 0;
388 char *buf;
389 int i;
390
391 mutex_lock(&mac->associnfo.mutex);
392 spin_lock_irqsave(&mac->lock, flags);
393 /* bleh. shouldn't be locked for that kmalloc... */
394
395 if (wrqu->data.length) {
396 if ((wrqu->data.length < 2) || (extra[1]+2 != wrqu->data.length)) {
397 /* this is an IE, so the length must be
398 * correct. Is it possible though that
399 * more than one IE is passed in?
400 */
401 err = -EINVAL;
402 goto out;
403 }
404 if (mac->wpa.IEbuflen <= wrqu->data.length) {
405 buf = kmalloc(wrqu->data.length, GFP_ATOMIC);
406 if (!buf) {
407 err = -ENOMEM;
408 goto out;
409 }
410 kfree(mac->wpa.IE);
411 mac->wpa.IE = buf;
412 mac->wpa.IEbuflen = wrqu->data.length;
413 }
414 memcpy(mac->wpa.IE, extra, wrqu->data.length);
415 dprintk(KERN_INFO PFX "generic IE set to ");
416 for (i=0;i<wrqu->data.length;i++)
417 dprintk("%.2x", (u8)mac->wpa.IE[i]);
418 dprintk("\n");
419 mac->wpa.IElen = wrqu->data.length;
420 } else {
421 kfree(mac->wpa.IE);
422 mac->wpa.IE = NULL;
423 mac->wpa.IElen = 0;
424 mac->wpa.IEbuflen = 0;
425 }
426
427 out:
428 spin_unlock_irqrestore(&mac->lock, flags);
429 mutex_unlock(&mac->associnfo.mutex);
430
431 return err;
432}
433EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_genie);
434
435int
436ieee80211softmac_wx_get_genie(struct net_device *dev,
437 struct iw_request_info *info,
438 union iwreq_data *wrqu,
439 char *extra)
440{
441 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
442 unsigned long flags;
443 int err = 0;
444 int space = wrqu->data.length;
445
446 mutex_lock(&mac->associnfo.mutex);
447 spin_lock_irqsave(&mac->lock, flags);
448
449 wrqu->data.length = 0;
450
451 if (mac->wpa.IE && mac->wpa.IElen) {
452 wrqu->data.length = mac->wpa.IElen;
453 if (mac->wpa.IElen <= space)
454 memcpy(extra, mac->wpa.IE, mac->wpa.IElen);
455 else
456 err = -E2BIG;
457 }
458 spin_unlock_irqrestore(&mac->lock, flags);
459 mutex_unlock(&mac->associnfo.mutex);
460
461 return err;
462}
463EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_genie);
464
465int
466ieee80211softmac_wx_set_mlme(struct net_device *dev,
467 struct iw_request_info *info,
468 union iwreq_data *wrqu,
469 char *extra)
470{
471 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
472 struct iw_mlme *mlme = (struct iw_mlme *)extra;
473 u16 reason = mlme->reason_code;
474 struct ieee80211softmac_network *net;
475 int err = -EINVAL;
476
477 mutex_lock(&mac->associnfo.mutex);
478
479 if (memcmp(mac->associnfo.bssid, mlme->addr.sa_data, ETH_ALEN)) {
480 printk(KERN_DEBUG PFX "wx_set_mlme: requested operation on net we don't use\n");
481 goto out;
482 }
483
484 switch (mlme->cmd) {
485 case IW_MLME_DEAUTH:
486 net = ieee80211softmac_get_network_by_bssid_locked(mac, mlme->addr.sa_data);
487 if (!net) {
488 printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n");
489 goto out;
490 }
491 err = ieee80211softmac_deauth_req(mac, net, reason);
492 goto out;
493 case IW_MLME_DISASSOC:
494 ieee80211softmac_send_disassoc_req(mac, reason);
495 mac->associnfo.associated = 0;
496 mac->associnfo.associating = 0;
497 err = 0;
498 goto out;
499 default:
500 err = -EOPNOTSUPP;
501 }
502
503out:
504 mutex_unlock(&mac->associnfo.mutex);
505
506 return err;
507}
508EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_mlme);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 0d109504ed86..f2b5270efdaa 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -243,6 +243,23 @@ void build_ehash_secret(void)
243} 243}
244EXPORT_SYMBOL(build_ehash_secret); 244EXPORT_SYMBOL(build_ehash_secret);
245 245
246static inline int inet_netns_ok(struct net *net, int protocol)
247{
248 int hash;
249 struct net_protocol *ipprot;
250
251 if (net == &init_net)
252 return 1;
253
254 hash = protocol & (MAX_INET_PROTOS - 1);
255 ipprot = rcu_dereference(inet_protos[hash]);
256
257 if (ipprot == NULL)
258 /* raw IP is OK */
259 return 1;
260 return ipprot->netns_ok;
261}
262
246/* 263/*
247 * Create an inet socket. 264 * Create an inet socket.
248 */ 265 */
@@ -259,9 +276,6 @@ static int inet_create(struct net *net, struct socket *sock, int protocol)
259 int try_loading_module = 0; 276 int try_loading_module = 0;
260 int err; 277 int err;
261 278
262 if (net != &init_net)
263 return -EAFNOSUPPORT;
264
265 if (sock->type != SOCK_RAW && 279 if (sock->type != SOCK_RAW &&
266 sock->type != SOCK_DGRAM && 280 sock->type != SOCK_DGRAM &&
267 !inet_ehash_secret) 281 !inet_ehash_secret)
@@ -320,6 +334,10 @@ lookup_protocol:
320 if (answer->capability > 0 && !capable(answer->capability)) 334 if (answer->capability > 0 && !capable(answer->capability))
321 goto out_rcu_unlock; 335 goto out_rcu_unlock;
322 336
337 err = -EAFNOSUPPORT;
338 if (!inet_netns_ok(net, protocol))
339 goto out_rcu_unlock;
340
323 sock->ops = answer->ops; 341 sock->ops = answer->ops;
324 answer_prot = answer->prot; 342 answer_prot = answer->prot;
325 answer_no_check = answer->no_check; 343 answer_no_check = answer->no_check;
@@ -446,7 +464,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
446 if (addr_len < sizeof(struct sockaddr_in)) 464 if (addr_len < sizeof(struct sockaddr_in))
447 goto out; 465 goto out;
448 466
449 chk_addr_ret = inet_addr_type(&init_net, addr->sin_addr.s_addr); 467 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
450 468
451 /* Not specified by any standard per-se, however it breaks too 469 /* Not specified by any standard per-se, however it breaks too
452 * many applications when removed. It is unfortunate since 470 * many applications when removed. It is unfortunate since
@@ -784,6 +802,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
784{ 802{
785 struct sock *sk = sock->sk; 803 struct sock *sk = sock->sk;
786 int err = 0; 804 int err = 0;
805 struct net *net = sock_net(sk);
787 806
788 switch (cmd) { 807 switch (cmd) {
789 case SIOCGSTAMP: 808 case SIOCGSTAMP:
@@ -795,12 +814,12 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
795 case SIOCADDRT: 814 case SIOCADDRT:
796 case SIOCDELRT: 815 case SIOCDELRT:
797 case SIOCRTMSG: 816 case SIOCRTMSG:
798 err = ip_rt_ioctl(sk->sk_net, cmd, (void __user *)arg); 817 err = ip_rt_ioctl(net, cmd, (void __user *)arg);
799 break; 818 break;
800 case SIOCDARP: 819 case SIOCDARP:
801 case SIOCGARP: 820 case SIOCGARP:
802 case SIOCSARP: 821 case SIOCSARP:
803 err = arp_ioctl(sk->sk_net, cmd, (void __user *)arg); 822 err = arp_ioctl(net, cmd, (void __user *)arg);
804 break; 823 break;
805 case SIOCGIFADDR: 824 case SIOCGIFADDR:
806 case SIOCSIFADDR: 825 case SIOCSIFADDR:
@@ -813,7 +832,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
813 case SIOCSIFPFLAGS: 832 case SIOCSIFPFLAGS:
814 case SIOCGIFPFLAGS: 833 case SIOCGIFPFLAGS:
815 case SIOCSIFFLAGS: 834 case SIOCSIFFLAGS:
816 err = devinet_ioctl(cmd, (void __user *)arg); 835 err = devinet_ioctl(net, cmd, (void __user *)arg);
817 break; 836 break;
818 default: 837 default:
819 if (sk->sk_prot->ioctl) 838 if (sk->sk_prot->ioctl)
@@ -1058,8 +1077,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1058 1077
1059 if (sysctl_ip_dynaddr > 1) { 1078 if (sysctl_ip_dynaddr > 1) {
1060 printk(KERN_INFO "%s(): shifting inet->" 1079 printk(KERN_INFO "%s(): shifting inet->"
1061 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n", 1080 "saddr from " NIPQUAD_FMT " to " NIPQUAD_FMT "\n",
1062 __FUNCTION__, 1081 __func__,
1063 NIPQUAD(old_saddr), 1082 NIPQUAD(old_saddr),
1064 NIPQUAD(new_saddr)); 1083 NIPQUAD(new_saddr));
1065 } 1084 }
@@ -1113,7 +1132,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1113 }; 1132 };
1114 1133
1115 security_sk_classify_flow(sk, &fl); 1134 security_sk_classify_flow(sk, &fl);
1116 err = ip_route_output_flow(&init_net, &rt, &fl, sk, 0); 1135 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
1117} 1136}
1118 if (!err) 1137 if (!err)
1119 sk_setup_caps(sk, &rt->u.dst); 1138 sk_setup_caps(sk, &rt->u.dst);
@@ -1231,6 +1250,29 @@ out:
1231 return segs; 1250 return segs;
1232} 1251}
1233 1252
1253int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1254 unsigned short type, unsigned char protocol,
1255 struct net *net)
1256{
1257 struct socket *sock;
1258 int rc = sock_create_kern(family, type, protocol, &sock);
1259
1260 if (rc == 0) {
1261 *sk = sock->sk;
1262 (*sk)->sk_allocation = GFP_ATOMIC;
1263 /*
1264 * Unhash it so that IP input processing does not even see it,
1265 * we do not wish this socket to see incoming packets.
1266 */
1267 (*sk)->sk_prot->unhash(*sk);
1268
1269 sk_change_net(*sk, net);
1270 }
1271 return rc;
1272}
1273
1274EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1275
1234unsigned long snmp_fold_field(void *mib[], int offt) 1276unsigned long snmp_fold_field(void *mib[], int offt)
1235{ 1277{
1236 unsigned long res = 0; 1278 unsigned long res = 0;
@@ -1283,17 +1325,20 @@ static struct net_protocol tcp_protocol = {
1283 .gso_send_check = tcp_v4_gso_send_check, 1325 .gso_send_check = tcp_v4_gso_send_check,
1284 .gso_segment = tcp_tso_segment, 1326 .gso_segment = tcp_tso_segment,
1285 .no_policy = 1, 1327 .no_policy = 1,
1328 .netns_ok = 1,
1286}; 1329};
1287 1330
1288static struct net_protocol udp_protocol = { 1331static struct net_protocol udp_protocol = {
1289 .handler = udp_rcv, 1332 .handler = udp_rcv,
1290 .err_handler = udp_err, 1333 .err_handler = udp_err,
1291 .no_policy = 1, 1334 .no_policy = 1,
1335 .netns_ok = 1,
1292}; 1336};
1293 1337
1294static struct net_protocol icmp_protocol = { 1338static struct net_protocol icmp_protocol = {
1295 .handler = icmp_rcv, 1339 .handler = icmp_rcv,
1296 .no_policy = 1, 1340 .no_policy = 1,
1341 .netns_ok = 1,
1297}; 1342};
1298 1343
1299static int __init init_ipv4_mibs(void) 1344static int __init init_ipv4_mibs(void)
@@ -1414,7 +1459,7 @@ static int __init inet_init(void)
1414 1459
1415 ip_init(); 1460 ip_init();
1416 1461
1417 tcp_v4_init(&inet_family_ops); 1462 tcp_v4_init();
1418 1463
1419 /* Setup TCP slab cache for open requests. */ 1464 /* Setup TCP slab cache for open requests. */
1420 tcp_init(); 1465 tcp_init();
@@ -1429,7 +1474,8 @@ static int __init inet_init(void)
1429 * Set the ICMP layer up 1474 * Set the ICMP layer up
1430 */ 1475 */
1431 1476
1432 icmp_init(&inet_family_ops); 1477 if (icmp_init() < 0)
1478 panic("Failed to create the ICMP control socket.\n");
1433 1479
1434 /* 1480 /*
1435 * Initialise the multicast router 1481 * Initialise the multicast router
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8e17f65f4002..68b72a7a1806 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -242,7 +242,7 @@ static int arp_constructor(struct neighbour *neigh)
242 return -EINVAL; 242 return -EINVAL;
243 } 243 }
244 244
245 neigh->type = inet_addr_type(&init_net, addr); 245 neigh->type = inet_addr_type(dev_net(dev), addr);
246 246
247 parms = in_dev->arp_parms; 247 parms = in_dev->arp_parms;
248 __neigh_parms_put(neigh->parms); 248 __neigh_parms_put(neigh->parms);
@@ -341,14 +341,14 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
341 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { 341 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
342 default: 342 default:
343 case 0: /* By default announce any local IP */ 343 case 0: /* By default announce any local IP */
344 if (skb && inet_addr_type(&init_net, ip_hdr(skb)->saddr) == RTN_LOCAL) 344 if (skb && inet_addr_type(dev_net(dev), ip_hdr(skb)->saddr) == RTN_LOCAL)
345 saddr = ip_hdr(skb)->saddr; 345 saddr = ip_hdr(skb)->saddr;
346 break; 346 break;
347 case 1: /* Restrict announcements of saddr in same subnet */ 347 case 1: /* Restrict announcements of saddr in same subnet */
348 if (!skb) 348 if (!skb)
349 break; 349 break;
350 saddr = ip_hdr(skb)->saddr; 350 saddr = ip_hdr(skb)->saddr;
351 if (inet_addr_type(&init_net, saddr) == RTN_LOCAL) { 351 if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) {
352 /* saddr should be known to target */ 352 /* saddr should be known to target */
353 if (inet_addr_onlink(in_dev, target, saddr)) 353 if (inet_addr_onlink(in_dev, target, saddr))
354 break; 354 break;
@@ -424,7 +424,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
424 int flag = 0; 424 int flag = 0;
425 /*unsigned long now; */ 425 /*unsigned long now; */
426 426
427 if (ip_route_output_key(&init_net, &rt, &fl) < 0) 427 if (ip_route_output_key(dev_net(dev), &rt, &fl) < 0)
428 return 1; 428 return 1;
429 if (rt->u.dst.dev != dev) { 429 if (rt->u.dst.dev != dev) {
430 NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); 430 NET_INC_STATS_BH(LINUX_MIB_ARPFILTER);
@@ -475,9 +475,9 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
475 return 1; 475 return 1;
476 } 476 }
477 477
478 paddr = ((struct rtable*)skb->dst)->rt_gateway; 478 paddr = skb->rtable->rt_gateway;
479 479
480 if (arp_set_predefined(inet_addr_type(&init_net, paddr), haddr, paddr, dev)) 480 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev))
481 return 0; 481 return 0;
482 482
483 n = __neigh_lookup(&arp_tbl, &paddr, dev, 1); 483 n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
@@ -570,14 +570,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
570 * Allocate a buffer 570 * Allocate a buffer
571 */ 571 */
572 572
573 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) 573 skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
574 + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
575 if (skb == NULL) 574 if (skb == NULL)
576 return NULL; 575 return NULL;
577 576
578 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 577 skb_reserve(skb, LL_RESERVED_SPACE(dev));
579 skb_reset_network_header(skb); 578 skb_reset_network_header(skb);
580 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); 579 arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
581 skb->dev = dev; 580 skb->dev = dev;
582 skb->protocol = htons(ETH_P_ARP); 581 skb->protocol = htons(ETH_P_ARP);
583 if (src_hw == NULL) 582 if (src_hw == NULL)
@@ -710,6 +709,7 @@ static int arp_process(struct sk_buff *skb)
710 u16 dev_type = dev->type; 709 u16 dev_type = dev->type;
711 int addr_type; 710 int addr_type;
712 struct neighbour *n; 711 struct neighbour *n;
712 struct net *net = dev_net(dev);
713 713
714 /* arp_rcv below verifies the ARP header and verifies the device 714 /* arp_rcv below verifies the ARP header and verifies the device
715 * is ARP'able. 715 * is ARP'able.
@@ -805,7 +805,7 @@ static int arp_process(struct sk_buff *skb)
805 /* Special case: IPv4 duplicate address detection packet (RFC2131) */ 805 /* Special case: IPv4 duplicate address detection packet (RFC2131) */
806 if (sip == 0) { 806 if (sip == 0) {
807 if (arp->ar_op == htons(ARPOP_REQUEST) && 807 if (arp->ar_op == htons(ARPOP_REQUEST) &&
808 inet_addr_type(&init_net, tip) == RTN_LOCAL && 808 inet_addr_type(net, tip) == RTN_LOCAL &&
809 !arp_ignore(in_dev, sip, tip)) 809 !arp_ignore(in_dev, sip, tip))
810 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 810 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
811 dev->dev_addr, sha); 811 dev->dev_addr, sha);
@@ -815,7 +815,7 @@ static int arp_process(struct sk_buff *skb)
815 if (arp->ar_op == htons(ARPOP_REQUEST) && 815 if (arp->ar_op == htons(ARPOP_REQUEST) &&
816 ip_route_input(skb, tip, sip, 0, dev) == 0) { 816 ip_route_input(skb, tip, sip, 0, dev) == 0) {
817 817
818 rt = (struct rtable*)skb->dst; 818 rt = skb->rtable;
819 addr_type = rt->rt_type; 819 addr_type = rt->rt_type;
820 820
821 if (addr_type == RTN_LOCAL) { 821 if (addr_type == RTN_LOCAL) {
@@ -835,7 +835,7 @@ static int arp_process(struct sk_buff *skb)
835 goto out; 835 goto out;
836 } else if (IN_DEV_FORWARD(in_dev)) { 836 } else if (IN_DEV_FORWARD(in_dev)) {
837 if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && 837 if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev &&
838 (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, &init_net, &tip, dev, 0))) { 838 (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
839 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 839 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
840 if (n) 840 if (n)
841 neigh_release(n); 841 neigh_release(n);
@@ -858,14 +858,14 @@ static int arp_process(struct sk_buff *skb)
858 858
859 n = __neigh_lookup(&arp_tbl, &sip, dev, 0); 859 n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
860 860
861 if (IPV4_DEVCONF_ALL(dev->nd_net, ARP_ACCEPT)) { 861 if (IPV4_DEVCONF_ALL(dev_net(dev), ARP_ACCEPT)) {
862 /* Unsolicited ARP is not accepted by default. 862 /* Unsolicited ARP is not accepted by default.
863 It is possible, that this option should be enabled for some 863 It is possible, that this option should be enabled for some
864 devices (strip is candidate) 864 devices (strip is candidate)
865 */ 865 */
866 if (n == NULL && 866 if (n == NULL &&
867 arp->ar_op == htons(ARPOP_REPLY) && 867 arp->ar_op == htons(ARPOP_REPLY) &&
868 inet_addr_type(&init_net, sip) == RTN_UNICAST) 868 inet_addr_type(net, sip) == RTN_UNICAST)
869 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 869 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
870 } 870 }
871 871
@@ -912,13 +912,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
912{ 912{
913 struct arphdr *arp; 913 struct arphdr *arp;
914 914
915 if (dev->nd_net != &init_net)
916 goto freeskb;
917
918 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 915 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
919 if (!pskb_may_pull(skb, (sizeof(struct arphdr) + 916 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
920 (2 * dev->addr_len) +
921 (2 * sizeof(u32)))))
922 goto freeskb; 917 goto freeskb;
923 918
924 arp = arp_hdr(skb); 919 arp = arp_hdr(skb);
@@ -1201,9 +1196,6 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo
1201{ 1196{
1202 struct net_device *dev = ptr; 1197 struct net_device *dev = ptr;
1203 1198
1204 if (dev->nd_net != &init_net)
1205 return NOTIFY_DONE;
1206
1207 switch (event) { 1199 switch (event) {
1208 case NETDEV_CHANGEADDR: 1200 case NETDEV_CHANGEADDR:
1209 neigh_changeaddr(&arp_tbl, dev); 1201 neigh_changeaddr(&arp_tbl, dev);
@@ -1318,7 +1310,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
1318#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 1310#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
1319 } 1311 }
1320#endif 1312#endif
1321 sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(*(u32*)n->primary_key)); 1313 sprintf(tbuf, NIPQUAD_FMT, NIPQUAD(*(u32*)n->primary_key));
1322 seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", 1314 seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
1323 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); 1315 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
1324 read_unlock(&n->lock); 1316 read_unlock(&n->lock);
@@ -1331,7 +1323,7 @@ static void arp_format_pneigh_entry(struct seq_file *seq,
1331 int hatype = dev ? dev->type : 0; 1323 int hatype = dev ? dev->type : 0;
1332 char tbuf[16]; 1324 char tbuf[16];
1333 1325
1334 sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(*(u32*)n->key)); 1326 sprintf(tbuf, NIPQUAD_FMT, NIPQUAD(*(u32*)n->key));
1335 seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", 1327 seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
1336 tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00", 1328 tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00",
1337 dev ? dev->name : "*"); 1329 dev ? dev->name : "*");
@@ -1385,13 +1377,29 @@ static const struct file_operations arp_seq_fops = {
1385 .release = seq_release_net, 1377 .release = seq_release_net,
1386}; 1378};
1387 1379
1388static int __init arp_proc_init(void) 1380
1381static int __net_init arp_net_init(struct net *net)
1389{ 1382{
1390 if (!proc_net_fops_create(&init_net, "arp", S_IRUGO, &arp_seq_fops)) 1383 if (!proc_net_fops_create(net, "arp", S_IRUGO, &arp_seq_fops))
1391 return -ENOMEM; 1384 return -ENOMEM;
1392 return 0; 1385 return 0;
1393} 1386}
1394 1387
1388static void __net_exit arp_net_exit(struct net *net)
1389{
1390 proc_net_remove(net, "arp");
1391}
1392
1393static struct pernet_operations arp_net_ops = {
1394 .init = arp_net_init,
1395 .exit = arp_net_exit,
1396};
1397
1398static int __init arp_proc_init(void)
1399{
1400 return register_pernet_subsys(&arp_net_ops);
1401}
1402
1395#else /* CONFIG_PROC_FS */ 1403#else /* CONFIG_PROC_FS */
1396 1404
1397static int __init arp_proc_init(void) 1405static int __init arp_proc_init(void)
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 8cd357f41283..4637ded3dba8 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1800,7 +1800,6 @@ int cipso_v4_sock_setattr(struct sock *sk,
1800 } 1800 }
1801 memcpy(opt->__data, buf, buf_len); 1801 memcpy(opt->__data, buf, buf_len);
1802 opt->optlen = opt_len; 1802 opt->optlen = opt_len;
1803 opt->is_data = 1;
1804 opt->cipso = sizeof(struct iphdr); 1803 opt->cipso = sizeof(struct iphdr);
1805 kfree(buf); 1804 kfree(buf);
1806 buf = NULL; 1805 buf = NULL;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 87490f7bb0f7..6848e4760f34 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -165,7 +165,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
165 if (!in_dev) 165 if (!in_dev)
166 goto out; 166 goto out;
167 INIT_RCU_HEAD(&in_dev->rcu_head); 167 INIT_RCU_HEAD(&in_dev->rcu_head);
168 memcpy(&in_dev->cnf, dev->nd_net->ipv4.devconf_dflt, 168 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
169 sizeof(in_dev->cnf)); 169 sizeof(in_dev->cnf));
170 in_dev->cnf.sysctl = NULL; 170 in_dev->cnf.sysctl = NULL;
171 in_dev->dev = dev; 171 in_dev->dev = dev;
@@ -437,7 +437,7 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
437 437
438static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 438static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
439{ 439{
440 struct net *net = skb->sk->sk_net; 440 struct net *net = sock_net(skb->sk);
441 struct nlattr *tb[IFA_MAX+1]; 441 struct nlattr *tb[IFA_MAX+1];
442 struct in_device *in_dev; 442 struct in_device *in_dev;
443 struct ifaddrmsg *ifm; 443 struct ifaddrmsg *ifm;
@@ -446,9 +446,6 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
446 446
447 ASSERT_RTNL(); 447 ASSERT_RTNL();
448 448
449 if (net != &init_net)
450 return -EINVAL;
451
452 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); 449 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
453 if (err < 0) 450 if (err < 0)
454 goto errout; 451 goto errout;
@@ -555,14 +552,11 @@ errout:
555 552
556static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 553static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
557{ 554{
558 struct net *net = skb->sk->sk_net; 555 struct net *net = sock_net(skb->sk);
559 struct in_ifaddr *ifa; 556 struct in_ifaddr *ifa;
560 557
561 ASSERT_RTNL(); 558 ASSERT_RTNL();
562 559
563 if (net != &init_net)
564 return -EINVAL;
565
566 ifa = rtm_to_ifaddr(net, nlh); 560 ifa = rtm_to_ifaddr(net, nlh);
567 if (IS_ERR(ifa)) 561 if (IS_ERR(ifa))
568 return PTR_ERR(ifa); 562 return PTR_ERR(ifa);
@@ -595,7 +589,7 @@ static __inline__ int inet_abc_len(__be32 addr)
595} 589}
596 590
597 591
598int devinet_ioctl(unsigned int cmd, void __user *arg) 592int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
599{ 593{
600 struct ifreq ifr; 594 struct ifreq ifr;
601 struct sockaddr_in sin_orig; 595 struct sockaddr_in sin_orig;
@@ -624,7 +618,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
624 *colon = 0; 618 *colon = 0;
625 619
626#ifdef CONFIG_KMOD 620#ifdef CONFIG_KMOD
627 dev_load(&init_net, ifr.ifr_name); 621 dev_load(net, ifr.ifr_name);
628#endif 622#endif
629 623
630 switch (cmd) { 624 switch (cmd) {
@@ -665,7 +659,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
665 rtnl_lock(); 659 rtnl_lock();
666 660
667 ret = -ENODEV; 661 ret = -ENODEV;
668 if ((dev = __dev_get_by_name(&init_net, ifr.ifr_name)) == NULL) 662 if ((dev = __dev_get_by_name(net, ifr.ifr_name)) == NULL)
669 goto done; 663 goto done;
670 664
671 if (colon) 665 if (colon)
@@ -878,6 +872,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
878{ 872{
879 __be32 addr = 0; 873 __be32 addr = 0;
880 struct in_device *in_dev; 874 struct in_device *in_dev;
875 struct net *net = dev_net(dev);
881 876
882 rcu_read_lock(); 877 rcu_read_lock();
883 in_dev = __in_dev_get_rcu(dev); 878 in_dev = __in_dev_get_rcu(dev);
@@ -906,7 +901,7 @@ no_in_dev:
906 */ 901 */
907 read_lock(&dev_base_lock); 902 read_lock(&dev_base_lock);
908 rcu_read_lock(); 903 rcu_read_lock();
909 for_each_netdev(&init_net, dev) { 904 for_each_netdev(net, dev) {
910 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) 905 if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
911 continue; 906 continue;
912 907
@@ -979,7 +974,7 @@ __be32 inet_confirm_addr(struct in_device *in_dev,
979 if (scope != RT_SCOPE_LINK) 974 if (scope != RT_SCOPE_LINK)
980 return confirm_addr_indev(in_dev, dst, local, scope); 975 return confirm_addr_indev(in_dev, dst, local, scope);
981 976
982 net = in_dev->dev->nd_net; 977 net = dev_net(in_dev->dev);
983 read_lock(&dev_base_lock); 978 read_lock(&dev_base_lock);
984 rcu_read_lock(); 979 rcu_read_lock();
985 for_each_netdev(net, dev) { 980 for_each_netdev(net, dev) {
@@ -1045,9 +1040,6 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1045 struct net_device *dev = ptr; 1040 struct net_device *dev = ptr;
1046 struct in_device *in_dev = __in_dev_get_rtnl(dev); 1041 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1047 1042
1048 if (dev->nd_net != &init_net)
1049 return NOTIFY_DONE;
1050
1051 ASSERT_RTNL(); 1043 ASSERT_RTNL();
1052 1044
1053 if (!in_dev) { 1045 if (!in_dev) {
@@ -1166,16 +1158,13 @@ nla_put_failure:
1166 1158
1167static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 1159static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1168{ 1160{
1169 struct net *net = skb->sk->sk_net; 1161 struct net *net = sock_net(skb->sk);
1170 int idx, ip_idx; 1162 int idx, ip_idx;
1171 struct net_device *dev; 1163 struct net_device *dev;
1172 struct in_device *in_dev; 1164 struct in_device *in_dev;
1173 struct in_ifaddr *ifa; 1165 struct in_ifaddr *ifa;
1174 int s_ip_idx, s_idx = cb->args[0]; 1166 int s_ip_idx, s_idx = cb->args[0];
1175 1167
1176 if (net != &init_net)
1177 return 0;
1178
1179 s_ip_idx = ip_idx = cb->args[1]; 1168 s_ip_idx = ip_idx = cb->args[1];
1180 idx = 0; 1169 idx = 0;
1181 for_each_netdev(net, dev) { 1170 for_each_netdev(net, dev) {
@@ -1214,7 +1203,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh,
1214 int err = -ENOBUFS; 1203 int err = -ENOBUFS;
1215 struct net *net; 1204 struct net *net;
1216 1205
1217 net = ifa->ifa_dev->dev->nd_net; 1206 net = dev_net(ifa->ifa_dev->dev);
1218 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); 1207 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1219 if (skb == NULL) 1208 if (skb == NULL)
1220 goto errout; 1209 goto errout;
@@ -1528,7 +1517,7 @@ static void devinet_sysctl_register(struct in_device *idev)
1528{ 1517{
1529 neigh_sysctl_register(idev->dev, idev->arp_parms, NET_IPV4, 1518 neigh_sysctl_register(idev->dev, idev->arp_parms, NET_IPV4,
1530 NET_IPV4_NEIGH, "ipv4", NULL, NULL); 1519 NET_IPV4_NEIGH, "ipv4", NULL, NULL);
1531 __devinet_sysctl_register(idev->dev->nd_net, idev->dev->name, 1520 __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
1532 idev->dev->ifindex, &idev->cnf); 1521 idev->dev->ifindex, &idev->cnf);
1533} 1522}
1534 1523
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 86ff2711fc95..0f1557a4ac7a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -257,7 +257,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
257 if (in_dev == NULL) 257 if (in_dev == NULL)
258 goto e_inval; 258 goto e_inval;
259 259
260 net = dev->nd_net; 260 net = dev_net(dev);
261 if (fib_lookup(net, &fl, &res)) 261 if (fib_lookup(net, &fl, &res))
262 goto last_resort; 262 goto last_resort;
263 if (res.type != RTN_UNICAST) 263 if (res.type != RTN_UNICAST)
@@ -583,7 +583,7 @@ errout:
583 583
584static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 584static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
585{ 585{
586 struct net *net = skb->sk->sk_net; 586 struct net *net = sock_net(skb->sk);
587 struct fib_config cfg; 587 struct fib_config cfg;
588 struct fib_table *tb; 588 struct fib_table *tb;
589 int err; 589 int err;
@@ -605,7 +605,7 @@ errout:
605 605
606static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 606static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
607{ 607{
608 struct net *net = skb->sk->sk_net; 608 struct net *net = sock_net(skb->sk);
609 struct fib_config cfg; 609 struct fib_config cfg;
610 struct fib_table *tb; 610 struct fib_table *tb;
611 int err; 611 int err;
@@ -627,7 +627,7 @@ errout:
627 627
628static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) 628static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
629{ 629{
630 struct net *net = skb->sk->sk_net; 630 struct net *net = sock_net(skb->sk);
631 unsigned int h, s_h; 631 unsigned int h, s_h;
632 unsigned int e = 0, s_e; 632 unsigned int e = 0, s_e;
633 struct fib_table *tb; 633 struct fib_table *tb;
@@ -674,7 +674,7 @@ out:
674 674
675static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa) 675static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
676{ 676{
677 struct net *net = ifa->ifa_dev->dev->nd_net; 677 struct net *net = dev_net(ifa->ifa_dev->dev);
678 struct fib_table *tb; 678 struct fib_table *tb;
679 struct fib_config cfg = { 679 struct fib_config cfg = {
680 .fc_protocol = RTPROT_KERNEL, 680 .fc_protocol = RTPROT_KERNEL,
@@ -801,15 +801,15 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
801 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim); 801 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
802 802
803 /* Check, that this local address finally disappeared. */ 803 /* Check, that this local address finally disappeared. */
804 if (inet_addr_type(dev->nd_net, ifa->ifa_local) != RTN_LOCAL) { 804 if (inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
805 /* And the last, but not the least thing. 805 /* And the last, but not the least thing.
806 We must flush stray FIB entries. 806 We must flush stray FIB entries.
807 807
808 First of all, we scan fib_info list searching 808 First of all, we scan fib_info list searching
809 for stray nexthop entries, then ignite fib_flush. 809 for stray nexthop entries, then ignite fib_flush.
810 */ 810 */
811 if (fib_sync_down_addr(dev->nd_net, ifa->ifa_local)) 811 if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
812 fib_flush(dev->nd_net); 812 fib_flush(dev_net(dev));
813 } 813 }
814 } 814 }
815#undef LOCAL_OK 815#undef LOCAL_OK
@@ -857,7 +857,7 @@ static void nl_fib_input(struct sk_buff *skb)
857 struct fib_table *tb; 857 struct fib_table *tb;
858 u32 pid; 858 u32 pid;
859 859
860 net = skb->sk->sk_net; 860 net = sock_net(skb->sk);
861 nlh = nlmsg_hdr(skb); 861 nlh = nlmsg_hdr(skb);
862 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || 862 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
863 nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn))) 863 nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn)))
@@ -899,7 +899,7 @@ static void nl_fib_lookup_exit(struct net *net)
899static void fib_disable_ip(struct net_device *dev, int force) 899static void fib_disable_ip(struct net_device *dev, int force)
900{ 900{
901 if (fib_sync_down_dev(dev, force)) 901 if (fib_sync_down_dev(dev, force))
902 fib_flush(dev->nd_net); 902 fib_flush(dev_net(dev));
903 rt_cache_flush(0); 903 rt_cache_flush(0);
904 arp_ifdown(dev); 904 arp_ifdown(dev);
905} 905}
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 8d58d85dfac6..02088deb0461 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -821,7 +821,7 @@ static struct fib_alias *fib_get_first(struct seq_file *seq)
821 struct fib_table *main_table; 821 struct fib_table *main_table;
822 struct fn_hash *table; 822 struct fn_hash *table;
823 823
824 main_table = fib_get_table(iter->p.net, RT_TABLE_MAIN); 824 main_table = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
825 table = (struct fn_hash *)main_table->tb_data; 825 table = (struct fn_hash *)main_table->tb_data;
826 826
827 iter->bucket = 0; 827 iter->bucket = 0;
@@ -959,11 +959,10 @@ static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
959static void *fib_seq_start(struct seq_file *seq, loff_t *pos) 959static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
960 __acquires(fib_hash_lock) 960 __acquires(fib_hash_lock)
961{ 961{
962 struct fib_iter_state *iter = seq->private;
963 void *v = NULL; 962 void *v = NULL;
964 963
965 read_lock(&fib_hash_lock); 964 read_lock(&fib_hash_lock);
966 if (fib_get_table(iter->p.net, RT_TABLE_MAIN)) 965 if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
967 v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 966 v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
968 return v; 967 return v;
969} 968}
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 19274d01afa4..1fb56876be54 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -137,7 +137,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
137 struct nlmsghdr *nlh, struct fib_rule_hdr *frh, 137 struct nlmsghdr *nlh, struct fib_rule_hdr *frh,
138 struct nlattr **tb) 138 struct nlattr **tb)
139{ 139{
140 struct net *net = skb->sk->sk_net; 140 struct net *net = sock_net(skb->sk);
141 int err = -EINVAL; 141 int err = -EINVAL;
142 struct fib4_rule *rule4 = (struct fib4_rule *) rule; 142 struct fib4_rule *rule4 = (struct fib4_rule *) rule;
143 143
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a13c84763d4c..3b83c34019fc 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -152,6 +152,7 @@ void free_fib_info(struct fib_info *fi)
152 nh->nh_dev = NULL; 152 nh->nh_dev = NULL;
153 } endfor_nexthops(fi); 153 } endfor_nexthops(fi);
154 fib_info_cnt--; 154 fib_info_cnt--;
155 release_net(fi->fib_net);
155 kfree(fi); 156 kfree(fi);
156} 157}
157 158
@@ -730,7 +731,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
730 goto failure; 731 goto failure;
731 fib_info_cnt++; 732 fib_info_cnt++;
732 733
733 fi->fib_net = net; 734 fi->fib_net = hold_net(net);
734 fi->fib_protocol = cfg->fc_protocol; 735 fi->fib_protocol = cfg->fc_protocol;
735 fi->fib_flags = cfg->fc_flags; 736 fi->fib_flags = cfg->fc_flags;
736 fi->fib_priority = cfg->fc_priority; 737 fi->fib_priority = cfg->fc_priority;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index f6cdc012eec5..ea294fffb9ce 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -122,7 +122,10 @@ struct tnode {
122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */ 122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
123 unsigned int full_children; /* KEYLENGTH bits needed */ 123 unsigned int full_children; /* KEYLENGTH bits needed */
124 unsigned int empty_children; /* KEYLENGTH bits needed */ 124 unsigned int empty_children; /* KEYLENGTH bits needed */
125 struct rcu_head rcu; 125 union {
126 struct rcu_head rcu;
127 struct work_struct work;
128 };
126 struct node *child[0]; 129 struct node *child[0];
127}; 130};
128 131
@@ -160,7 +163,6 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
160static struct node *resize(struct trie *t, struct tnode *tn); 163static struct node *resize(struct trie *t, struct tnode *tn);
161static struct tnode *inflate(struct trie *t, struct tnode *tn); 164static struct tnode *inflate(struct trie *t, struct tnode *tn);
162static struct tnode *halve(struct trie *t, struct tnode *tn); 165static struct tnode *halve(struct trie *t, struct tnode *tn);
163static void tnode_free(struct tnode *tn);
164 166
165static struct kmem_cache *fn_alias_kmem __read_mostly; 167static struct kmem_cache *fn_alias_kmem __read_mostly;
166static struct kmem_cache *trie_leaf_kmem __read_mostly; 168static struct kmem_cache *trie_leaf_kmem __read_mostly;
@@ -334,6 +336,11 @@ static void __leaf_free_rcu(struct rcu_head *head)
334 kmem_cache_free(trie_leaf_kmem, l); 336 kmem_cache_free(trie_leaf_kmem, l);
335} 337}
336 338
339static inline void free_leaf(struct leaf *l)
340{
341 call_rcu_bh(&l->rcu, __leaf_free_rcu);
342}
343
337static void __leaf_info_free_rcu(struct rcu_head *head) 344static void __leaf_info_free_rcu(struct rcu_head *head)
338{ 345{
339 kfree(container_of(head, struct leaf_info, rcu)); 346 kfree(container_of(head, struct leaf_info, rcu));
@@ -346,16 +353,16 @@ static inline void free_leaf_info(struct leaf_info *leaf)
346 353
347static struct tnode *tnode_alloc(size_t size) 354static struct tnode *tnode_alloc(size_t size)
348{ 355{
349 struct page *pages;
350
351 if (size <= PAGE_SIZE) 356 if (size <= PAGE_SIZE)
352 return kzalloc(size, GFP_KERNEL); 357 return kzalloc(size, GFP_KERNEL);
358 else
359 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
360}
353 361
354 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size)); 362static void __tnode_vfree(struct work_struct *arg)
355 if (!pages) 363{
356 return NULL; 364 struct tnode *tn = container_of(arg, struct tnode, work);
357 365 vfree(tn);
358 return page_address(pages);
359} 366}
360 367
361static void __tnode_free_rcu(struct rcu_head *head) 368static void __tnode_free_rcu(struct rcu_head *head)
@@ -366,16 +373,17 @@ static void __tnode_free_rcu(struct rcu_head *head)
366 373
367 if (size <= PAGE_SIZE) 374 if (size <= PAGE_SIZE)
368 kfree(tn); 375 kfree(tn);
369 else 376 else {
370 free_pages((unsigned long)tn, get_order(size)); 377 INIT_WORK(&tn->work, __tnode_vfree);
378 schedule_work(&tn->work);
379 }
371} 380}
372 381
373static inline void tnode_free(struct tnode *tn) 382static inline void tnode_free(struct tnode *tn)
374{ 383{
375 if (IS_LEAF(tn)) { 384 if (IS_LEAF(tn))
376 struct leaf *l = (struct leaf *) tn; 385 free_leaf((struct leaf *) tn);
377 call_rcu_bh(&l->rcu, __leaf_free_rcu); 386 else
378 } else
379 call_rcu(&tn->rcu, __tnode_free_rcu); 387 call_rcu(&tn->rcu, __tnode_free_rcu);
380} 388}
381 389
@@ -1086,7 +1094,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1086 li = leaf_info_new(plen); 1094 li = leaf_info_new(plen);
1087 1095
1088 if (!li) { 1096 if (!li) {
1089 tnode_free((struct tnode *) l); 1097 free_leaf(l);
1090 return NULL; 1098 return NULL;
1091 } 1099 }
1092 1100
@@ -1122,7 +1130,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1122 1130
1123 if (!tn) { 1131 if (!tn) {
1124 free_leaf_info(li); 1132 free_leaf_info(li);
1125 tnode_free((struct tnode *) l); 1133 free_leaf(l);
1126 return NULL; 1134 return NULL;
1127 } 1135 }
1128 1136
@@ -1578,7 +1586,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
1578 } else 1586 } else
1579 rcu_assign_pointer(t->trie, NULL); 1587 rcu_assign_pointer(t->trie, NULL);
1580 1588
1581 tnode_free((struct tnode *) l); 1589 free_leaf(l);
1582} 1590}
1583 1591
1584/* 1592/*
@@ -1665,7 +1673,7 @@ static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1665 return 0; 1673 return 0;
1666} 1674}
1667 1675
1668static int trie_flush_list(struct trie *t, struct list_head *head) 1676static int trie_flush_list(struct list_head *head)
1669{ 1677{
1670 struct fib_alias *fa, *fa_node; 1678 struct fib_alias *fa, *fa_node;
1671 int found = 0; 1679 int found = 0;
@@ -1683,7 +1691,7 @@ static int trie_flush_list(struct trie *t, struct list_head *head)
1683 return found; 1691 return found;
1684} 1692}
1685 1693
1686static int trie_flush_leaf(struct trie *t, struct leaf *l) 1694static int trie_flush_leaf(struct leaf *l)
1687{ 1695{
1688 int found = 0; 1696 int found = 0;
1689 struct hlist_head *lih = &l->list; 1697 struct hlist_head *lih = &l->list;
@@ -1691,7 +1699,7 @@ static int trie_flush_leaf(struct trie *t, struct leaf *l)
1691 struct leaf_info *li = NULL; 1699 struct leaf_info *li = NULL;
1692 1700
1693 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { 1701 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1694 found += trie_flush_list(t, &li->falh); 1702 found += trie_flush_list(&li->falh);
1695 1703
1696 if (list_empty(&li->falh)) { 1704 if (list_empty(&li->falh)) {
1697 hlist_del_rcu(&li->hlist); 1705 hlist_del_rcu(&li->hlist);
@@ -1782,7 +1790,7 @@ static int fn_trie_flush(struct fib_table *tb)
1782 int found = 0; 1790 int found = 0;
1783 1791
1784 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) { 1792 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
1785 found += trie_flush_leaf(t, l); 1793 found += trie_flush_leaf(l);
1786 1794
1787 if (ll && hlist_empty(&ll->list)) 1795 if (ll && hlist_empty(&ll->list))
1788 trie_leaf_remove(t, ll); 1796 trie_leaf_remove(t, ll);
@@ -2029,9 +2037,8 @@ struct fib_table *fib_hash_table(u32 id)
2029/* Depth first Trie walk iterator */ 2037/* Depth first Trie walk iterator */
2030struct fib_trie_iter { 2038struct fib_trie_iter {
2031 struct seq_net_private p; 2039 struct seq_net_private p;
2032 struct trie *trie_local, *trie_main; 2040 struct fib_table *tb;
2033 struct tnode *tnode; 2041 struct tnode *tnode;
2034 struct trie *trie;
2035 unsigned index; 2042 unsigned index;
2036 unsigned depth; 2043 unsigned depth;
2037}; 2044};
@@ -2084,31 +2091,26 @@ rescan:
2084static struct node *fib_trie_get_first(struct fib_trie_iter *iter, 2091static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2085 struct trie *t) 2092 struct trie *t)
2086{ 2093{
2087 struct node *n ; 2094 struct node *n;
2088 2095
2089 if (!t) 2096 if (!t)
2090 return NULL; 2097 return NULL;
2091 2098
2092 n = rcu_dereference(t->trie); 2099 n = rcu_dereference(t->trie);
2093 2100 if (!n)
2094 if (!iter)
2095 return NULL; 2101 return NULL;
2096 2102
2097 if (n) { 2103 if (IS_TNODE(n)) {
2098 if (IS_TNODE(n)) { 2104 iter->tnode = (struct tnode *) n;
2099 iter->tnode = (struct tnode *) n; 2105 iter->index = 0;
2100 iter->trie = t; 2106 iter->depth = 1;
2101 iter->index = 0; 2107 } else {
2102 iter->depth = 1; 2108 iter->tnode = NULL;
2103 } else { 2109 iter->index = 0;
2104 iter->tnode = NULL; 2110 iter->depth = 0;
2105 iter->trie = t;
2106 iter->index = 0;
2107 iter->depth = 0;
2108 }
2109 return n;
2110 } 2111 }
2111 return NULL; 2112
2113 return n;
2112} 2114}
2113 2115
2114static void trie_collect_stats(struct trie *t, struct trie_stat *s) 2116static void trie_collect_stats(struct trie *t, struct trie_stat *s)
@@ -2119,8 +2121,7 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2119 memset(s, 0, sizeof(*s)); 2121 memset(s, 0, sizeof(*s));
2120 2122
2121 rcu_read_lock(); 2123 rcu_read_lock();
2122 for (n = fib_trie_get_first(&iter, t); n; 2124 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
2123 n = fib_trie_get_next(&iter)) {
2124 if (IS_LEAF(n)) { 2125 if (IS_LEAF(n)) {
2125 struct leaf *l = (struct leaf *)n; 2126 struct leaf *l = (struct leaf *)n;
2126 struct leaf_info *li; 2127 struct leaf_info *li;
@@ -2209,36 +2210,48 @@ static void trie_show_usage(struct seq_file *seq,
2209} 2210}
2210#endif /* CONFIG_IP_FIB_TRIE_STATS */ 2211#endif /* CONFIG_IP_FIB_TRIE_STATS */
2211 2212
2212static void fib_trie_show(struct seq_file *seq, const char *name, 2213static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
2213 struct trie *trie)
2214{ 2214{
2215 struct trie_stat stat; 2215 if (tb->tb_id == RT_TABLE_LOCAL)
2216 2216 seq_puts(seq, "Local:\n");
2217 trie_collect_stats(trie, &stat); 2217 else if (tb->tb_id == RT_TABLE_MAIN)
2218 seq_printf(seq, "%s:\n", name); 2218 seq_puts(seq, "Main:\n");
2219 trie_show_stats(seq, &stat); 2219 else
2220#ifdef CONFIG_IP_FIB_TRIE_STATS 2220 seq_printf(seq, "Id %d:\n", tb->tb_id);
2221 trie_show_usage(seq, &trie->stats);
2222#endif
2223} 2221}
2224 2222
2223
2225static int fib_triestat_seq_show(struct seq_file *seq, void *v) 2224static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2226{ 2225{
2227 struct net *net = (struct net *)seq->private; 2226 struct net *net = (struct net *)seq->private;
2228 struct fib_table *tb; 2227 unsigned int h;
2229 2228
2230 seq_printf(seq, 2229 seq_printf(seq,
2231 "Basic info: size of leaf:" 2230 "Basic info: size of leaf:"
2232 " %Zd bytes, size of tnode: %Zd bytes.\n", 2231 " %Zd bytes, size of tnode: %Zd bytes.\n",
2233 sizeof(struct leaf), sizeof(struct tnode)); 2232 sizeof(struct leaf), sizeof(struct tnode));
2234 2233
2235 tb = fib_get_table(net, RT_TABLE_LOCAL); 2234 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2236 if (tb) 2235 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2237 fib_trie_show(seq, "Local", (struct trie *) tb->tb_data); 2236 struct hlist_node *node;
2237 struct fib_table *tb;
2238
2239 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
2240 struct trie *t = (struct trie *) tb->tb_data;
2241 struct trie_stat stat;
2242
2243 if (!t)
2244 continue;
2238 2245
2239 tb = fib_get_table(net, RT_TABLE_MAIN); 2246 fib_table_print(seq, tb);
2240 if (tb) 2247
2241 fib_trie_show(seq, "Main", (struct trie *) tb->tb_data); 2248 trie_collect_stats(t, &stat);
2249 trie_show_stats(seq, &stat);
2250#ifdef CONFIG_IP_FIB_TRIE_STATS
2251 trie_show_usage(seq, &t->stats);
2252#endif
2253 }
2254 }
2242 2255
2243 return 0; 2256 return 0;
2244} 2257}
@@ -2274,67 +2287,79 @@ static const struct file_operations fib_triestat_fops = {
2274 .release = fib_triestat_seq_release, 2287 .release = fib_triestat_seq_release,
2275}; 2288};
2276 2289
2277static struct node *fib_trie_get_idx(struct fib_trie_iter *iter, 2290static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2278 loff_t pos)
2279{ 2291{
2292 struct fib_trie_iter *iter = seq->private;
2293 struct net *net = seq_file_net(seq);
2280 loff_t idx = 0; 2294 loff_t idx = 0;
2281 struct node *n; 2295 unsigned int h;
2282 2296
2283 for (n = fib_trie_get_first(iter, iter->trie_local); 2297 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2284 n; ++idx, n = fib_trie_get_next(iter)) { 2298 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2285 if (pos == idx) 2299 struct hlist_node *node;
2286 return n; 2300 struct fib_table *tb;
2287 }
2288 2301
2289 for (n = fib_trie_get_first(iter, iter->trie_main); 2302 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
2290 n; ++idx, n = fib_trie_get_next(iter)) { 2303 struct node *n;
2291 if (pos == idx) 2304
2292 return n; 2305 for (n = fib_trie_get_first(iter,
2306 (struct trie *) tb->tb_data);
2307 n; n = fib_trie_get_next(iter))
2308 if (pos == idx++) {
2309 iter->tb = tb;
2310 return n;
2311 }
2312 }
2293 } 2313 }
2314
2294 return NULL; 2315 return NULL;
2295} 2316}
2296 2317
2297static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos) 2318static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2298 __acquires(RCU) 2319 __acquires(RCU)
2299{ 2320{
2300 struct fib_trie_iter *iter = seq->private;
2301 struct fib_table *tb;
2302
2303 if (!iter->trie_local) {
2304 tb = fib_get_table(iter->p.net, RT_TABLE_LOCAL);
2305 if (tb)
2306 iter->trie_local = (struct trie *) tb->tb_data;
2307 }
2308 if (!iter->trie_main) {
2309 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2310 if (tb)
2311 iter->trie_main = (struct trie *) tb->tb_data;
2312 }
2313 rcu_read_lock(); 2321 rcu_read_lock();
2314 if (*pos == 0) 2322 return fib_trie_get_idx(seq, *pos);
2315 return SEQ_START_TOKEN;
2316 return fib_trie_get_idx(iter, *pos - 1);
2317} 2323}
2318 2324
2319static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2325static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2320{ 2326{
2321 struct fib_trie_iter *iter = seq->private; 2327 struct fib_trie_iter *iter = seq->private;
2322 void *l = v; 2328 struct net *net = seq_file_net(seq);
2329 struct fib_table *tb = iter->tb;
2330 struct hlist_node *tb_node;
2331 unsigned int h;
2332 struct node *n;
2323 2333
2324 ++*pos; 2334 ++*pos;
2325 if (v == SEQ_START_TOKEN) 2335 /* next node in same table */
2326 return fib_trie_get_idx(iter, 0); 2336 n = fib_trie_get_next(iter);
2327 2337 if (n)
2328 v = fib_trie_get_next(iter); 2338 return n;
2329 BUG_ON(v == l);
2330 if (v)
2331 return v;
2332 2339
2333 /* continue scan in next trie */ 2340 /* walk rest of this hash chain */
2334 if (iter->trie == iter->trie_local) 2341 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2335 return fib_trie_get_first(iter, iter->trie_main); 2342 while ( (tb_node = rcu_dereference(tb->tb_hlist.next)) ) {
2343 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2344 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2345 if (n)
2346 goto found;
2347 }
2336 2348
2349 /* new hash chain */
2350 while (++h < FIB_TABLE_HASHSZ) {
2351 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2352 hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) {
2353 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2354 if (n)
2355 goto found;
2356 }
2357 }
2337 return NULL; 2358 return NULL;
2359
2360found:
2361 iter->tb = tb;
2362 return n;
2338} 2363}
2339 2364
2340static void fib_trie_seq_stop(struct seq_file *seq, void *v) 2365static void fib_trie_seq_stop(struct seq_file *seq, void *v)
@@ -2391,22 +2416,15 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2391 const struct fib_trie_iter *iter = seq->private; 2416 const struct fib_trie_iter *iter = seq->private;
2392 struct node *n = v; 2417 struct node *n = v;
2393 2418
2394 if (v == SEQ_START_TOKEN) 2419 if (!node_parent_rcu(n))
2395 return 0; 2420 fib_table_print(seq, iter->tb);
2396
2397 if (!node_parent_rcu(n)) {
2398 if (iter->trie == iter->trie_local)
2399 seq_puts(seq, "<local>:\n");
2400 else
2401 seq_puts(seq, "<main>:\n");
2402 }
2403 2421
2404 if (IS_TNODE(n)) { 2422 if (IS_TNODE(n)) {
2405 struct tnode *tn = (struct tnode *) n; 2423 struct tnode *tn = (struct tnode *) n;
2406 __be32 prf = htonl(mask_pfx(tn->key, tn->pos)); 2424 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2407 2425
2408 seq_indent(seq, iter->depth-1); 2426 seq_indent(seq, iter->depth-1);
2409 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n", 2427 seq_printf(seq, " +-- " NIPQUAD_FMT "/%d %d %d %d\n",
2410 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children, 2428 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2411 tn->empty_children); 2429 tn->empty_children);
2412 2430
@@ -2417,7 +2435,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2417 __be32 val = htonl(l->key); 2435 __be32 val = htonl(l->key);
2418 2436
2419 seq_indent(seq, iter->depth); 2437 seq_indent(seq, iter->depth);
2420 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val)); 2438 seq_printf(seq, " |-- " NIPQUAD_FMT "\n", NIPQUAD(val));
2421 2439
2422 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 2440 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2423 struct fib_alias *fa; 2441 struct fib_alias *fa;
@@ -2502,7 +2520,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2502 struct fib_table *tb; 2520 struct fib_table *tb;
2503 2521
2504 rcu_read_lock(); 2522 rcu_read_lock();
2505 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN); 2523 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
2506 if (!tb) 2524 if (!tb)
2507 return NULL; 2525 return NULL;
2508 2526
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 40508babad8c..f064031f2031 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -93,6 +93,7 @@
93#include <asm/uaccess.h> 93#include <asm/uaccess.h>
94#include <net/checksum.h> 94#include <net/checksum.h>
95#include <net/xfrm.h> 95#include <net/xfrm.h>
96#include <net/inet_common.h>
96 97
97/* 98/*
98 * Build xmit assembly blocks 99 * Build xmit assembly blocks
@@ -188,29 +189,6 @@ struct icmp_err icmp_err_convert[] = {
188 }, 189 },
189}; 190};
190 191
191/* Control parameters for ECHO replies. */
192int sysctl_icmp_echo_ignore_all __read_mostly;
193int sysctl_icmp_echo_ignore_broadcasts __read_mostly = 1;
194
195/* Control parameter - ignore bogus broadcast responses? */
196int sysctl_icmp_ignore_bogus_error_responses __read_mostly = 1;
197
198/*
199 * Configurable global rate limit.
200 *
201 * ratelimit defines tokens/packet consumed for dst->rate_token bucket
202 * ratemask defines which icmp types are ratelimited by setting
203 * it's bit position.
204 *
205 * default:
206 * dest unreachable (3), source quench (4),
207 * time exceeded (11), parameter problem (12)
208 */
209
210int sysctl_icmp_ratelimit __read_mostly = 1 * HZ;
211int sysctl_icmp_ratemask __read_mostly = 0x1818;
212int sysctl_icmp_errors_use_inbound_ifaddr __read_mostly;
213
214/* 192/*
215 * ICMP control array. This specifies what to do with each ICMP. 193 * ICMP control array. This specifies what to do with each ICMP.
216 */ 194 */
@@ -229,14 +207,16 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
229 * 207 *
230 * On SMP we have one ICMP socket per-cpu. 208 * On SMP we have one ICMP socket per-cpu.
231 */ 209 */
232static DEFINE_PER_CPU(struct socket *, __icmp_socket) = NULL; 210static struct sock *icmp_sk(struct net *net)
233#define icmp_socket __get_cpu_var(__icmp_socket) 211{
212 return net->ipv4.icmp_sk[smp_processor_id()];
213}
234 214
235static inline int icmp_xmit_lock(void) 215static inline int icmp_xmit_lock(struct sock *sk)
236{ 216{
237 local_bh_disable(); 217 local_bh_disable();
238 218
239 if (unlikely(!spin_trylock(&icmp_socket->sk->sk_lock.slock))) { 219 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
240 /* This can happen if the output path signals a 220 /* This can happen if the output path signals a
241 * dst_link_failure() for an outgoing ICMP packet. 221 * dst_link_failure() for an outgoing ICMP packet.
242 */ 222 */
@@ -246,9 +226,9 @@ static inline int icmp_xmit_lock(void)
246 return 0; 226 return 0;
247} 227}
248 228
249static inline void icmp_xmit_unlock(void) 229static inline void icmp_xmit_unlock(struct sock *sk)
250{ 230{
251 spin_unlock_bh(&icmp_socket->sk->sk_lock.slock); 231 spin_unlock_bh(&sk->sk_lock.slock);
252} 232}
253 233
254/* 234/*
@@ -291,7 +271,8 @@ int xrlim_allow(struct dst_entry *dst, int timeout)
291 return rc; 271 return rc;
292} 272}
293 273
294static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code) 274static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
275 int type, int code)
295{ 276{
296 struct dst_entry *dst = &rt->u.dst; 277 struct dst_entry *dst = &rt->u.dst;
297 int rc = 1; 278 int rc = 1;
@@ -308,8 +289,8 @@ static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code)
308 goto out; 289 goto out;
309 290
310 /* Limit if icmp type is enabled in ratemask. */ 291 /* Limit if icmp type is enabled in ratemask. */
311 if ((1 << type) & sysctl_icmp_ratemask) 292 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask)
312 rc = xrlim_allow(dst, sysctl_icmp_ratelimit); 293 rc = xrlim_allow(dst, net->ipv4.sysctl_icmp_ratelimit);
313out: 294out:
314 return rc; 295 return rc;
315} 296}
@@ -346,19 +327,21 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
346static void icmp_push_reply(struct icmp_bxm *icmp_param, 327static void icmp_push_reply(struct icmp_bxm *icmp_param,
347 struct ipcm_cookie *ipc, struct rtable *rt) 328 struct ipcm_cookie *ipc, struct rtable *rt)
348{ 329{
330 struct sock *sk;
349 struct sk_buff *skb; 331 struct sk_buff *skb;
350 332
351 if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, 333 sk = icmp_sk(dev_net(rt->u.dst.dev));
334 if (ip_append_data(sk, icmp_glue_bits, icmp_param,
352 icmp_param->data_len+icmp_param->head_len, 335 icmp_param->data_len+icmp_param->head_len,
353 icmp_param->head_len, 336 icmp_param->head_len,
354 ipc, rt, MSG_DONTWAIT) < 0) 337 ipc, rt, MSG_DONTWAIT) < 0)
355 ip_flush_pending_frames(icmp_socket->sk); 338 ip_flush_pending_frames(sk);
356 else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { 339 else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
357 struct icmphdr *icmph = icmp_hdr(skb); 340 struct icmphdr *icmph = icmp_hdr(skb);
358 __wsum csum = 0; 341 __wsum csum = 0;
359 struct sk_buff *skb1; 342 struct sk_buff *skb1;
360 343
361 skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) { 344 skb_queue_walk(&sk->sk_write_queue, skb1) {
362 csum = csum_add(csum, skb1->csum); 345 csum = csum_add(csum, skb1->csum);
363 } 346 }
364 csum = csum_partial_copy_nocheck((void *)&icmp_param->data, 347 csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
@@ -366,7 +349,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
366 icmp_param->head_len, csum); 349 icmp_param->head_len, csum);
367 icmph->checksum = csum_fold(csum); 350 icmph->checksum = csum_fold(csum);
368 skb->ip_summed = CHECKSUM_NONE; 351 skb->ip_summed = CHECKSUM_NONE;
369 ip_push_pending_frames(icmp_socket->sk); 352 ip_push_pending_frames(sk);
370 } 353 }
371} 354}
372 355
@@ -376,16 +359,17 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
376 359
377static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) 360static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
378{ 361{
379 struct sock *sk = icmp_socket->sk;
380 struct inet_sock *inet = inet_sk(sk);
381 struct ipcm_cookie ipc; 362 struct ipcm_cookie ipc;
382 struct rtable *rt = (struct rtable *)skb->dst; 363 struct rtable *rt = skb->rtable;
364 struct net *net = dev_net(rt->u.dst.dev);
365 struct sock *sk = icmp_sk(net);
366 struct inet_sock *inet = inet_sk(sk);
383 __be32 daddr; 367 __be32 daddr;
384 368
385 if (ip_options_echo(&icmp_param->replyopts, skb)) 369 if (ip_options_echo(&icmp_param->replyopts, skb))
386 return; 370 return;
387 371
388 if (icmp_xmit_lock()) 372 if (icmp_xmit_lock(sk))
389 return; 373 return;
390 374
391 icmp_param->data.icmph.checksum = 0; 375 icmp_param->data.icmph.checksum = 0;
@@ -405,15 +389,15 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
405 .tos = RT_TOS(ip_hdr(skb)->tos) } }, 389 .tos = RT_TOS(ip_hdr(skb)->tos) } },
406 .proto = IPPROTO_ICMP }; 390 .proto = IPPROTO_ICMP };
407 security_skb_classify_flow(skb, &fl); 391 security_skb_classify_flow(skb, &fl);
408 if (ip_route_output_key(rt->u.dst.dev->nd_net, &rt, &fl)) 392 if (ip_route_output_key(net, &rt, &fl))
409 goto out_unlock; 393 goto out_unlock;
410 } 394 }
411 if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type, 395 if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
412 icmp_param->data.icmph.code)) 396 icmp_param->data.icmph.code))
413 icmp_push_reply(icmp_param, &ipc, rt); 397 icmp_push_reply(icmp_param, &ipc, rt);
414 ip_rt_put(rt); 398 ip_rt_put(rt);
415out_unlock: 399out_unlock:
416 icmp_xmit_unlock(); 400 icmp_xmit_unlock(sk);
417} 401}
418 402
419 403
@@ -433,15 +417,17 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
433 struct iphdr *iph; 417 struct iphdr *iph;
434 int room; 418 int room;
435 struct icmp_bxm icmp_param; 419 struct icmp_bxm icmp_param;
436 struct rtable *rt = (struct rtable *)skb_in->dst; 420 struct rtable *rt = skb_in->rtable;
437 struct ipcm_cookie ipc; 421 struct ipcm_cookie ipc;
438 __be32 saddr; 422 __be32 saddr;
439 u8 tos; 423 u8 tos;
440 struct net *net; 424 struct net *net;
425 struct sock *sk;
441 426
442 if (!rt) 427 if (!rt)
443 goto out; 428 goto out;
444 net = rt->u.dst.dev->nd_net; 429 net = dev_net(rt->u.dst.dev);
430 sk = icmp_sk(net);
445 431
446 /* 432 /*
447 * Find the original header. It is expected to be valid, of course. 433 * Find the original header. It is expected to be valid, of course.
@@ -505,7 +491,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
505 } 491 }
506 } 492 }
507 493
508 if (icmp_xmit_lock()) 494 if (icmp_xmit_lock(sk))
509 return; 495 return;
510 496
511 /* 497 /*
@@ -516,7 +502,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
516 if (!(rt->rt_flags & RTCF_LOCAL)) { 502 if (!(rt->rt_flags & RTCF_LOCAL)) {
517 struct net_device *dev = NULL; 503 struct net_device *dev = NULL;
518 504
519 if (rt->fl.iif && sysctl_icmp_errors_use_inbound_ifaddr) 505 if (rt->fl.iif &&
506 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
520 dev = dev_get_by_index(net, rt->fl.iif); 507 dev = dev_get_by_index(net, rt->fl.iif);
521 508
522 if (dev) { 509 if (dev) {
@@ -544,7 +531,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
544 icmp_param.data.icmph.checksum = 0; 531 icmp_param.data.icmph.checksum = 0;
545 icmp_param.skb = skb_in; 532 icmp_param.skb = skb_in;
546 icmp_param.offset = skb_network_offset(skb_in); 533 icmp_param.offset = skb_network_offset(skb_in);
547 inet_sk(icmp_socket->sk)->tos = tos; 534 inet_sk(sk)->tos = tos;
548 ipc.addr = iph->saddr; 535 ipc.addr = iph->saddr;
549 ipc.opt = &icmp_param.replyopts; 536 ipc.opt = &icmp_param.replyopts;
550 537
@@ -609,7 +596,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
609 RT_TOS(tos), rt2->u.dst.dev); 596 RT_TOS(tos), rt2->u.dst.dev);
610 597
611 dst_release(&rt2->u.dst); 598 dst_release(&rt2->u.dst);
612 rt2 = (struct rtable *)skb_in->dst; 599 rt2 = skb_in->rtable;
613 skb_in->dst = odst; 600 skb_in->dst = odst;
614 } 601 }
615 602
@@ -634,7 +621,7 @@ relookup_failed:
634 } 621 }
635 622
636route_done: 623route_done:
637 if (!icmpv4_xrlim_allow(rt, type, code)) 624 if (!icmpv4_xrlim_allow(net, rt, type, code))
638 goto ende; 625 goto ende;
639 626
640 /* RFC says return as much as we can without exceeding 576 bytes. */ 627 /* RFC says return as much as we can without exceeding 576 bytes. */
@@ -654,7 +641,7 @@ route_done:
654ende: 641ende:
655 ip_rt_put(rt); 642 ip_rt_put(rt);
656out_unlock: 643out_unlock:
657 icmp_xmit_unlock(); 644 icmp_xmit_unlock(sk);
658out:; 645out:;
659} 646}
660 647
@@ -672,7 +659,7 @@ static void icmp_unreach(struct sk_buff *skb)
672 u32 info = 0; 659 u32 info = 0;
673 struct net *net; 660 struct net *net;
674 661
675 net = skb->dst->dev->nd_net; 662 net = dev_net(skb->dst->dev);
676 663
677 /* 664 /*
678 * Incomplete header ? 665 * Incomplete header ?
@@ -698,7 +685,7 @@ static void icmp_unreach(struct sk_buff *skb)
698 break; 685 break;
699 case ICMP_FRAG_NEEDED: 686 case ICMP_FRAG_NEEDED:
700 if (ipv4_config.no_pmtu_disc) { 687 if (ipv4_config.no_pmtu_disc) {
701 LIMIT_NETDEBUG(KERN_INFO "ICMP: %u.%u.%u.%u: " 688 LIMIT_NETDEBUG(KERN_INFO "ICMP: " NIPQUAD_FMT ": "
702 "fragmentation needed " 689 "fragmentation needed "
703 "and DF set.\n", 690 "and DF set.\n",
704 NIPQUAD(iph->daddr)); 691 NIPQUAD(iph->daddr));
@@ -710,7 +697,7 @@ static void icmp_unreach(struct sk_buff *skb)
710 } 697 }
711 break; 698 break;
712 case ICMP_SR_FAILED: 699 case ICMP_SR_FAILED:
713 LIMIT_NETDEBUG(KERN_INFO "ICMP: %u.%u.%u.%u: Source " 700 LIMIT_NETDEBUG(KERN_INFO "ICMP: " NIPQUAD_FMT ": Source "
714 "Route Failed.\n", 701 "Route Failed.\n",
715 NIPQUAD(iph->daddr)); 702 NIPQUAD(iph->daddr));
716 break; 703 break;
@@ -740,12 +727,12 @@ static void icmp_unreach(struct sk_buff *skb)
740 * get the other vendor to fix their kit. 727 * get the other vendor to fix their kit.
741 */ 728 */
742 729
743 if (!sysctl_icmp_ignore_bogus_error_responses && 730 if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
744 inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { 731 inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
745 if (net_ratelimit()) 732 if (net_ratelimit())
746 printk(KERN_WARNING "%u.%u.%u.%u sent an invalid ICMP " 733 printk(KERN_WARNING NIPQUAD_FMT " sent an invalid ICMP "
747 "type %u, code %u " 734 "type %u, code %u "
748 "error to a broadcast: %u.%u.%u.%u on %s\n", 735 "error to a broadcast: " NIPQUAD_FMT " on %s\n",
749 NIPQUAD(ip_hdr(skb)->saddr), 736 NIPQUAD(ip_hdr(skb)->saddr),
750 icmph->type, icmph->code, 737 icmph->type, icmph->code,
751 NIPQUAD(iph->daddr), 738 NIPQUAD(iph->daddr),
@@ -835,7 +822,10 @@ out_err:
835 822
836static void icmp_echo(struct sk_buff *skb) 823static void icmp_echo(struct sk_buff *skb)
837{ 824{
838 if (!sysctl_icmp_echo_ignore_all) { 825 struct net *net;
826
827 net = dev_net(skb->dst->dev);
828 if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
839 struct icmp_bxm icmp_param; 829 struct icmp_bxm icmp_param;
840 830
841 icmp_param.data.icmph = *icmp_hdr(skb); 831 icmp_param.data.icmph = *icmp_hdr(skb);
@@ -938,7 +928,7 @@ static void icmp_address(struct sk_buff *skb)
938 928
939static void icmp_address_reply(struct sk_buff *skb) 929static void icmp_address_reply(struct sk_buff *skb)
940{ 930{
941 struct rtable *rt = (struct rtable *)skb->dst; 931 struct rtable *rt = skb->rtable;
942 struct net_device *dev = skb->dev; 932 struct net_device *dev = skb->dev;
943 struct in_device *in_dev; 933 struct in_device *in_dev;
944 struct in_ifaddr *ifa; 934 struct in_ifaddr *ifa;
@@ -963,8 +953,8 @@ static void icmp_address_reply(struct sk_buff *skb)
963 break; 953 break;
964 } 954 }
965 if (!ifa && net_ratelimit()) { 955 if (!ifa && net_ratelimit()) {
966 printk(KERN_INFO "Wrong address mask %u.%u.%u.%u from " 956 printk(KERN_INFO "Wrong address mask " NIPQUAD_FMT " from "
967 "%s/%u.%u.%u.%u\n", 957 "%s/" NIPQUAD_FMT "\n",
968 NIPQUAD(*mp), dev->name, NIPQUAD(rt->rt_src)); 958 NIPQUAD(*mp), dev->name, NIPQUAD(rt->rt_src));
969 } 959 }
970 } 960 }
@@ -983,7 +973,7 @@ static void icmp_discard(struct sk_buff *skb)
983int icmp_rcv(struct sk_buff *skb) 973int icmp_rcv(struct sk_buff *skb)
984{ 974{
985 struct icmphdr *icmph; 975 struct icmphdr *icmph;
986 struct rtable *rt = (struct rtable *)skb->dst; 976 struct rtable *rt = skb->rtable;
987 977
988 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 978 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
989 int nh; 979 int nh;
@@ -1038,6 +1028,9 @@ int icmp_rcv(struct sk_buff *skb)
1038 */ 1028 */
1039 1029
1040 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 1030 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1031 struct net *net;
1032
1033 net = dev_net(rt->u.dst.dev);
1041 /* 1034 /*
1042 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be 1035 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
1043 * silently ignored (we let user decide with a sysctl). 1036 * silently ignored (we let user decide with a sysctl).
@@ -1046,7 +1039,7 @@ int icmp_rcv(struct sk_buff *skb)
1046 */ 1039 */
1047 if ((icmph->type == ICMP_ECHO || 1040 if ((icmph->type == ICMP_ECHO ||
1048 icmph->type == ICMP_TIMESTAMP) && 1041 icmph->type == ICMP_TIMESTAMP) &&
1049 sysctl_icmp_echo_ignore_broadcasts) { 1042 net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
1050 goto error; 1043 goto error;
1051 } 1044 }
1052 if (icmph->type != ICMP_ECHO && 1045 if (icmph->type != ICMP_ECHO &&
@@ -1141,38 +1134,84 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1141 }, 1134 },
1142}; 1135};
1143 1136
1144void __init icmp_init(struct net_proto_family *ops) 1137static void __net_exit icmp_sk_exit(struct net *net)
1145{ 1138{
1146 struct inet_sock *inet;
1147 int i; 1139 int i;
1148 1140
1149 for_each_possible_cpu(i) { 1141 for_each_possible_cpu(i)
1150 int err; 1142 inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
1143 kfree(net->ipv4.icmp_sk);
1144 net->ipv4.icmp_sk = NULL;
1145}
1146
1147int __net_init icmp_sk_init(struct net *net)
1148{
1149 int i, err;
1151 1150
1152 err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, 1151 net->ipv4.icmp_sk =
1153 &per_cpu(__icmp_socket, i)); 1152 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
1153 if (net->ipv4.icmp_sk == NULL)
1154 return -ENOMEM;
1154 1155
1156 for_each_possible_cpu(i) {
1157 struct sock *sk;
1158
1159 err = inet_ctl_sock_create(&sk, PF_INET,
1160 SOCK_RAW, IPPROTO_ICMP, net);
1155 if (err < 0) 1161 if (err < 0)
1156 panic("Failed to create the ICMP control socket.\n"); 1162 goto fail;
1157 1163
1158 per_cpu(__icmp_socket, i)->sk->sk_allocation = GFP_ATOMIC; 1164 net->ipv4.icmp_sk[i] = sk;
1159 1165
1160 /* Enough space for 2 64K ICMP packets, including 1166 /* Enough space for 2 64K ICMP packets, including
1161 * sk_buff struct overhead. 1167 * sk_buff struct overhead.
1162 */ 1168 */
1163 per_cpu(__icmp_socket, i)->sk->sk_sndbuf = 1169 sk->sk_sndbuf =
1164 (2 * ((64 * 1024) + sizeof(struct sk_buff))); 1170 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
1165 1171
1166 inet = inet_sk(per_cpu(__icmp_socket, i)->sk); 1172 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
1167 inet->uc_ttl = -1;
1168 inet->pmtudisc = IP_PMTUDISC_DONT;
1169
1170 /* Unhash it so that IP input processing does not even
1171 * see it, we do not wish this socket to see incoming
1172 * packets.
1173 */
1174 per_cpu(__icmp_socket, i)->sk->sk_prot->unhash(per_cpu(__icmp_socket, i)->sk);
1175 } 1173 }
1174
1175 /* Control parameters for ECHO replies. */
1176 net->ipv4.sysctl_icmp_echo_ignore_all = 0;
1177 net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
1178
1179 /* Control parameter - ignore bogus broadcast responses? */
1180 net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
1181
1182 /*
1183 * Configurable global rate limit.
1184 *
1185 * ratelimit defines tokens/packet consumed for dst->rate_token
1186 * bucket ratemask defines which icmp types are ratelimited by
1187 * setting it's bit position.
1188 *
1189 * default:
1190 * dest unreachable (3), source quench (4),
1191 * time exceeded (11), parameter problem (12)
1192 */
1193
1194 net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
1195 net->ipv4.sysctl_icmp_ratemask = 0x1818;
1196 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
1197
1198 return 0;
1199
1200fail:
1201 for_each_possible_cpu(i)
1202 inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
1203 kfree(net->ipv4.icmp_sk);
1204 return err;
1205}
1206
1207static struct pernet_operations __net_initdata icmp_sk_ops = {
1208 .init = icmp_sk_init,
1209 .exit = icmp_sk_exit,
1210};
1211
1212int __init icmp_init(void)
1213{
1214 return register_pernet_device(&icmp_sk_ops);
1176} 1215}
1177 1216
1178EXPORT_SYMBOL(icmp_err_convert); 1217EXPORT_SYMBOL(icmp_err_convert);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 732cd07e6071..6250f4239b61 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -130,12 +130,12 @@
130 */ 130 */
131 131
132#define IGMP_V1_SEEN(in_dev) \ 132#define IGMP_V1_SEEN(in_dev) \
133 (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, FORCE_IGMP_VERSION) == 1 || \ 133 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
134 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \ 134 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
135 ((in_dev)->mr_v1_seen && \ 135 ((in_dev)->mr_v1_seen && \
136 time_before(jiffies, (in_dev)->mr_v1_seen))) 136 time_before(jiffies, (in_dev)->mr_v1_seen)))
137#define IGMP_V2_SEEN(in_dev) \ 137#define IGMP_V2_SEEN(in_dev) \
138 (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, FORCE_IGMP_VERSION) == 2 || \ 138 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
139 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \ 139 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
140 ((in_dev)->mr_v2_seen && \ 140 ((in_dev)->mr_v2_seen && \
141 time_before(jiffies, (in_dev)->mr_v2_seen))) 141 time_before(jiffies, (in_dev)->mr_v2_seen)))
@@ -948,7 +948,7 @@ int igmp_rcv(struct sk_buff *skb)
948 case IGMPV2_HOST_MEMBERSHIP_REPORT: 948 case IGMPV2_HOST_MEMBERSHIP_REPORT:
949 case IGMPV3_HOST_MEMBERSHIP_REPORT: 949 case IGMPV3_HOST_MEMBERSHIP_REPORT:
950 /* Is it our report looped back? */ 950 /* Is it our report looped back? */
951 if (((struct rtable*)skb->dst)->fl.iif == 0) 951 if (skb->rtable->fl.iif == 0)
952 break; 952 break;
953 /* don't rely on MC router hearing unicast reports */ 953 /* don't rely on MC router hearing unicast reports */
954 if (skb->pkt_type == PACKET_MULTICAST || 954 if (skb->pkt_type == PACKET_MULTICAST ||
@@ -1198,6 +1198,9 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1198 1198
1199 ASSERT_RTNL(); 1199 ASSERT_RTNL();
1200 1200
1201 if (dev_net(in_dev->dev) != &init_net)
1202 return;
1203
1201 for (im=in_dev->mc_list; im; im=im->next) { 1204 for (im=in_dev->mc_list; im; im=im->next) {
1202 if (im->multiaddr == addr) { 1205 if (im->multiaddr == addr) {
1203 im->users++; 1206 im->users++;
@@ -1277,6 +1280,9 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1277 1280
1278 ASSERT_RTNL(); 1281 ASSERT_RTNL();
1279 1282
1283 if (dev_net(in_dev->dev) != &init_net)
1284 return;
1285
1280 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { 1286 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
1281 if (i->multiaddr==addr) { 1287 if (i->multiaddr==addr) {
1282 if (--i->users == 0) { 1288 if (--i->users == 0) {
@@ -1304,6 +1310,9 @@ void ip_mc_down(struct in_device *in_dev)
1304 1310
1305 ASSERT_RTNL(); 1311 ASSERT_RTNL();
1306 1312
1313 if (dev_net(in_dev->dev) != &init_net)
1314 return;
1315
1307 for (i=in_dev->mc_list; i; i=i->next) 1316 for (i=in_dev->mc_list; i; i=i->next)
1308 igmp_group_dropped(i); 1317 igmp_group_dropped(i);
1309 1318
@@ -1324,6 +1333,9 @@ void ip_mc_init_dev(struct in_device *in_dev)
1324{ 1333{
1325 ASSERT_RTNL(); 1334 ASSERT_RTNL();
1326 1335
1336 if (dev_net(in_dev->dev) != &init_net)
1337 return;
1338
1327 in_dev->mc_tomb = NULL; 1339 in_dev->mc_tomb = NULL;
1328#ifdef CONFIG_IP_MULTICAST 1340#ifdef CONFIG_IP_MULTICAST
1329 in_dev->mr_gq_running = 0; 1341 in_dev->mr_gq_running = 0;
@@ -1347,6 +1359,9 @@ void ip_mc_up(struct in_device *in_dev)
1347 1359
1348 ASSERT_RTNL(); 1360 ASSERT_RTNL();
1349 1361
1362 if (dev_net(in_dev->dev) != &init_net)
1363 return;
1364
1350 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1365 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1351 1366
1352 for (i=in_dev->mc_list; i; i=i->next) 1367 for (i=in_dev->mc_list; i; i=i->next)
@@ -1363,6 +1378,9 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1363 1378
1364 ASSERT_RTNL(); 1379 ASSERT_RTNL();
1365 1380
1381 if (dev_net(in_dev->dev) != &init_net)
1382 return;
1383
1366 /* Deactivate timers */ 1384 /* Deactivate timers */
1367 ip_mc_down(in_dev); 1385 ip_mc_down(in_dev);
1368 1386
@@ -1744,6 +1762,9 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1744 if (!ipv4_is_multicast(addr)) 1762 if (!ipv4_is_multicast(addr))
1745 return -EINVAL; 1763 return -EINVAL;
1746 1764
1765 if (sock_net(sk) != &init_net)
1766 return -EPROTONOSUPPORT;
1767
1747 rtnl_lock(); 1768 rtnl_lock();
1748 1769
1749 in_dev = ip_mc_find_dev(imr); 1770 in_dev = ip_mc_find_dev(imr);
@@ -1812,6 +1833,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1812 u32 ifindex; 1833 u32 ifindex;
1813 int ret = -EADDRNOTAVAIL; 1834 int ret = -EADDRNOTAVAIL;
1814 1835
1836 if (sock_net(sk) != &init_net)
1837 return -EPROTONOSUPPORT;
1838
1815 rtnl_lock(); 1839 rtnl_lock();
1816 in_dev = ip_mc_find_dev(imr); 1840 in_dev = ip_mc_find_dev(imr);
1817 ifindex = imr->imr_ifindex; 1841 ifindex = imr->imr_ifindex;
@@ -1857,6 +1881,9 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1857 if (!ipv4_is_multicast(addr)) 1881 if (!ipv4_is_multicast(addr))
1858 return -EINVAL; 1882 return -EINVAL;
1859 1883
1884 if (sock_net(sk) != &init_net)
1885 return -EPROTONOSUPPORT;
1886
1860 rtnl_lock(); 1887 rtnl_lock();
1861 1888
1862 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; 1889 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
@@ -1990,6 +2017,9 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
1990 msf->imsf_fmode != MCAST_EXCLUDE) 2017 msf->imsf_fmode != MCAST_EXCLUDE)
1991 return -EINVAL; 2018 return -EINVAL;
1992 2019
2020 if (sock_net(sk) != &init_net)
2021 return -EPROTONOSUPPORT;
2022
1993 rtnl_lock(); 2023 rtnl_lock();
1994 2024
1995 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2025 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
@@ -2070,6 +2100,9 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2070 if (!ipv4_is_multicast(addr)) 2100 if (!ipv4_is_multicast(addr))
2071 return -EINVAL; 2101 return -EINVAL;
2072 2102
2103 if (sock_net(sk) != &init_net)
2104 return -EPROTONOSUPPORT;
2105
2073 rtnl_lock(); 2106 rtnl_lock();
2074 2107
2075 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2108 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
@@ -2132,6 +2165,9 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2132 if (!ipv4_is_multicast(addr)) 2165 if (!ipv4_is_multicast(addr))
2133 return -EINVAL; 2166 return -EINVAL;
2134 2167
2168 if (sock_net(sk) != &init_net)
2169 return -EPROTONOSUPPORT;
2170
2135 rtnl_lock(); 2171 rtnl_lock();
2136 2172
2137 err = -EADDRNOTAVAIL; 2173 err = -EADDRNOTAVAIL;
@@ -2216,6 +2252,9 @@ void ip_mc_drop_socket(struct sock *sk)
2216 if (inet->mc_list == NULL) 2252 if (inet->mc_list == NULL)
2217 return; 2253 return;
2218 2254
2255 if (sock_net(sk) != &init_net)
2256 return;
2257
2219 rtnl_lock(); 2258 rtnl_lock();
2220 while ((iml = inet->mc_list) != NULL) { 2259 while ((iml = inet->mc_list) != NULL) {
2221 struct in_device *in_dev; 2260 struct in_device *in_dev;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b189278c7bc1..828ea211ff21 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -55,6 +55,13 @@ int inet_csk_bind_conflict(const struct sock *sk,
55 struct hlist_node *node; 55 struct hlist_node *node;
56 int reuse = sk->sk_reuse; 56 int reuse = sk->sk_reuse;
57 57
58 /*
59 * Unlike other sk lookup places we do not check
60 * for sk_net here, since _all_ the socks listed
61 * in tb->owners list belong to the same net - the
62 * one this bucket belongs to.
63 */
64
58 sk_for_each_bound(sk2, node, &tb->owners) { 65 sk_for_each_bound(sk2, node, &tb->owners) {
59 if (sk != sk2 && 66 if (sk != sk2 &&
60 !inet_v6_ipv6only(sk2) && 67 !inet_v6_ipv6only(sk2) &&
@@ -80,12 +87,12 @@ EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
80 */ 87 */
81int inet_csk_get_port(struct sock *sk, unsigned short snum) 88int inet_csk_get_port(struct sock *sk, unsigned short snum)
82{ 89{
83 struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 90 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
84 struct inet_bind_hashbucket *head; 91 struct inet_bind_hashbucket *head;
85 struct hlist_node *node; 92 struct hlist_node *node;
86 struct inet_bind_bucket *tb; 93 struct inet_bind_bucket *tb;
87 int ret; 94 int ret;
88 struct net *net = sk->sk_net; 95 struct net *net = sock_net(sk);
89 96
90 local_bh_disable(); 97 local_bh_disable();
91 if (!snum) { 98 if (!snum) {
@@ -133,8 +140,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
133 goto tb_not_found; 140 goto tb_not_found;
134tb_found: 141tb_found:
135 if (!hlist_empty(&tb->owners)) { 142 if (!hlist_empty(&tb->owners)) {
136 if (sk->sk_reuse > 1)
137 goto success;
138 if (tb->fastreuse > 0 && 143 if (tb->fastreuse > 0 &&
139 sk->sk_reuse && sk->sk_state != TCP_LISTEN) { 144 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
140 goto success; 145 goto success;
@@ -333,7 +338,7 @@ struct dst_entry* inet_csk_route_req(struct sock *sk,
333 .dport = ireq->rmt_port } } }; 338 .dport = ireq->rmt_port } } };
334 339
335 security_req_classify_flow(req, &fl); 340 security_req_classify_flow(req, &fl);
336 if (ip_route_output_flow(&init_net, &rt, &fl, sk, 0)) { 341 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) {
337 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 342 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
338 return NULL; 343 return NULL;
339 } 344 }
@@ -414,8 +419,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
414 struct inet_connection_sock *icsk = inet_csk(parent); 419 struct inet_connection_sock *icsk = inet_csk(parent);
415 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 420 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
416 struct listen_sock *lopt = queue->listen_opt; 421 struct listen_sock *lopt = queue->listen_opt;
417 int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; 422 int thresh = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
418 int thresh = max_retries;
419 unsigned long now = jiffies; 423 unsigned long now = jiffies;
420 struct request_sock **reqp, *req; 424 struct request_sock **reqp, *req;
421 int i, budget; 425 int i, budget;
@@ -451,9 +455,6 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
451 } 455 }
452 } 456 }
453 457
454 if (queue->rskq_defer_accept)
455 max_retries = queue->rskq_defer_accept;
456
457 budget = 2 * (lopt->nr_table_entries / (timeout / interval)); 458 budget = 2 * (lopt->nr_table_entries / (timeout / interval));
458 i = lopt->clock_hand; 459 i = lopt->clock_hand;
459 460
@@ -461,9 +462,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
461 reqp=&lopt->syn_table[i]; 462 reqp=&lopt->syn_table[i];
462 while ((req = *reqp) != NULL) { 463 while ((req = *reqp) != NULL) {
463 if (time_after_eq(now, req->expires)) { 464 if (time_after_eq(now, req->expires)) {
464 if ((req->retrans < thresh || 465 if (req->retrans < thresh &&
465 (inet_rsk(req)->acked && req->retrans < max_retries)) 466 !req->rsk_ops->rtx_syn_ack(parent, req)) {
466 && !req->rsk_ops->rtx_syn_ack(parent, req, NULL)) {
467 unsigned long timeo; 467 unsigned long timeo;
468 468
469 if (req->retrans++ == 0) 469 if (req->retrans++ == 0)
@@ -656,25 +656,6 @@ void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
656 656
657EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); 657EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
658 658
659int inet_csk_ctl_sock_create(struct socket **sock, unsigned short family,
660 unsigned short type, unsigned char protocol)
661{
662 int rc = sock_create_kern(family, type, protocol, sock);
663
664 if (rc == 0) {
665 (*sock)->sk->sk_allocation = GFP_ATOMIC;
666 inet_sk((*sock)->sk)->uc_ttl = -1;
667 /*
668 * Unhash it so that IP input processing does not even see it,
669 * we do not wish this socket to see incoming packets.
670 */
671 (*sock)->sk->sk_prot->unhash((*sock)->sk);
672 }
673 return rc;
674}
675
676EXPORT_SYMBOL_GPL(inet_csk_ctl_sock_create);
677
678#ifdef CONFIG_COMPAT 659#ifdef CONFIG_COMPAT
679int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, 660int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
680 char __user *optval, int __user *optlen) 661 char __user *optval, int __user *optlen)
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index a0a3c78cb5e0..4ed429bd5951 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -107,10 +107,10 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
107 if (del_timer(&fq->timer)) 107 if (del_timer(&fq->timer))
108 atomic_dec(&fq->refcnt); 108 atomic_dec(&fq->refcnt);
109 109
110 if (!(fq->last_in & COMPLETE)) { 110 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
111 fq_unlink(fq, f); 111 fq_unlink(fq, f);
112 atomic_dec(&fq->refcnt); 112 atomic_dec(&fq->refcnt);
113 fq->last_in |= COMPLETE; 113 fq->last_in |= INET_FRAG_COMPLETE;
114 } 114 }
115} 115}
116 116
@@ -134,7 +134,7 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
134 struct sk_buff *fp; 134 struct sk_buff *fp;
135 struct netns_frags *nf; 135 struct netns_frags *nf;
136 136
137 BUG_TRAP(q->last_in & COMPLETE); 137 BUG_TRAP(q->last_in & INET_FRAG_COMPLETE);
138 BUG_TRAP(del_timer(&q->timer) == 0); 138 BUG_TRAP(del_timer(&q->timer) == 0);
139 139
140 /* Release all fragment data. */ 140 /* Release all fragment data. */
@@ -177,7 +177,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f)
177 read_unlock(&f->lock); 177 read_unlock(&f->lock);
178 178
179 spin_lock(&q->lock); 179 spin_lock(&q->lock);
180 if (!(q->last_in & COMPLETE)) 180 if (!(q->last_in & INET_FRAG_COMPLETE))
181 inet_frag_kill(q, f); 181 inet_frag_kill(q, f);
182 spin_unlock(&q->lock); 182 spin_unlock(&q->lock);
183 183
@@ -209,7 +209,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
209 if (qp->net == nf && f->match(qp, arg)) { 209 if (qp->net == nf && f->match(qp, arg)) {
210 atomic_inc(&qp->refcnt); 210 atomic_inc(&qp->refcnt);
211 write_unlock(&f->lock); 211 write_unlock(&f->lock);
212 qp_in->last_in |= COMPLETE; 212 qp_in->last_in |= INET_FRAG_COMPLETE;
213 inet_frag_put(qp_in, f); 213 inet_frag_put(qp_in, f);
214 return qp; 214 return qp;
215 } 215 }
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 1aba606f6bbb..2023d37b2708 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -35,7 +35,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
35 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 35 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
36 36
37 if (tb != NULL) { 37 if (tb != NULL) {
38 tb->ib_net = net; 38 tb->ib_net = hold_net(net);
39 tb->port = snum; 39 tb->port = snum;
40 tb->fastreuse = 0; 40 tb->fastreuse = 0;
41 INIT_HLIST_HEAD(&tb->owners); 41 INIT_HLIST_HEAD(&tb->owners);
@@ -51,6 +51,7 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
51{ 51{
52 if (hlist_empty(&tb->owners)) { 52 if (hlist_empty(&tb->owners)) {
53 __hlist_del(&tb->node); 53 __hlist_del(&tb->node);
54 release_net(tb->ib_net);
54 kmem_cache_free(cachep, tb); 55 kmem_cache_free(cachep, tb);
55 } 56 }
56} 57}
@@ -68,7 +69,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
68 */ 69 */
69static void __inet_put_port(struct sock *sk) 70static void __inet_put_port(struct sock *sk)
70{ 71{
71 struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 72 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
72 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); 73 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
73 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 74 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
74 struct inet_bind_bucket *tb; 75 struct inet_bind_bucket *tb;
@@ -91,6 +92,22 @@ void inet_put_port(struct sock *sk)
91 92
92EXPORT_SYMBOL(inet_put_port); 93EXPORT_SYMBOL(inet_put_port);
93 94
95void __inet_inherit_port(struct sock *sk, struct sock *child)
96{
97 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
98 const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
99 struct inet_bind_hashbucket *head = &table->bhash[bhash];
100 struct inet_bind_bucket *tb;
101
102 spin_lock(&head->lock);
103 tb = inet_csk(sk)->icsk_bind_hash;
104 sk_add_bind_node(child, &tb->owners);
105 inet_csk(child)->icsk_bind_hash = tb;
106 spin_unlock(&head->lock);
107}
108
109EXPORT_SYMBOL_GPL(__inet_inherit_port);
110
94/* 111/*
95 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP. 112 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
96 * Look, when several writers sleep and reader wakes them up, all but one 113 * Look, when several writers sleep and reader wakes them up, all but one
@@ -139,7 +156,7 @@ static struct sock *inet_lookup_listener_slow(struct net *net,
139 sk_for_each(sk, node, head) { 156 sk_for_each(sk, node, head) {
140 const struct inet_sock *inet = inet_sk(sk); 157 const struct inet_sock *inet = inet_sk(sk);
141 158
142 if (sk->sk_net == net && inet->num == hnum && 159 if (net_eq(sock_net(sk), net) && inet->num == hnum &&
143 !ipv6_only_sock(sk)) { 160 !ipv6_only_sock(sk)) {
144 const __be32 rcv_saddr = inet->rcv_saddr; 161 const __be32 rcv_saddr = inet->rcv_saddr;
145 int score = sk->sk_family == PF_INET ? 1 : 0; 162 int score = sk->sk_family == PF_INET ? 1 : 0;
@@ -182,7 +199,7 @@ struct sock *__inet_lookup_listener(struct net *net,
182 if (inet->num == hnum && !sk->sk_node.next && 199 if (inet->num == hnum && !sk->sk_node.next &&
183 (!inet->rcv_saddr || inet->rcv_saddr == daddr) && 200 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
184 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) && 201 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
185 !sk->sk_bound_dev_if && sk->sk_net == net) 202 !sk->sk_bound_dev_if && net_eq(sock_net(sk), net))
186 goto sherry_cache; 203 goto sherry_cache;
187 sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif); 204 sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif);
188 } 205 }
@@ -254,7 +271,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
254 struct sock *sk2; 271 struct sock *sk2;
255 const struct hlist_node *node; 272 const struct hlist_node *node;
256 struct inet_timewait_sock *tw; 273 struct inet_timewait_sock *tw;
257 struct net *net = sk->sk_net; 274 struct net *net = sock_net(sk);
258 275
259 prefetch(head->chain.first); 276 prefetch(head->chain.first);
260 write_lock(lock); 277 write_lock(lock);
@@ -288,7 +305,7 @@ unique:
288 sk->sk_hash = hash; 305 sk->sk_hash = hash;
289 BUG_TRAP(sk_unhashed(sk)); 306 BUG_TRAP(sk_unhashed(sk));
290 __sk_add_node(sk, &head->chain); 307 __sk_add_node(sk, &head->chain);
291 sock_prot_inuse_add(sk->sk_prot, 1); 308 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
292 write_unlock(lock); 309 write_unlock(lock);
293 310
294 if (twp) { 311 if (twp) {
@@ -318,7 +335,7 @@ static inline u32 inet_sk_port_offset(const struct sock *sk)
318 335
319void __inet_hash_nolisten(struct sock *sk) 336void __inet_hash_nolisten(struct sock *sk)
320{ 337{
321 struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 338 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
322 struct hlist_head *list; 339 struct hlist_head *list;
323 rwlock_t *lock; 340 rwlock_t *lock;
324 struct inet_ehash_bucket *head; 341 struct inet_ehash_bucket *head;
@@ -332,14 +349,14 @@ void __inet_hash_nolisten(struct sock *sk)
332 349
333 write_lock(lock); 350 write_lock(lock);
334 __sk_add_node(sk, list); 351 __sk_add_node(sk, list);
335 sock_prot_inuse_add(sk->sk_prot, 1); 352 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
336 write_unlock(lock); 353 write_unlock(lock);
337} 354}
338EXPORT_SYMBOL_GPL(__inet_hash_nolisten); 355EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
339 356
340static void __inet_hash(struct sock *sk) 357static void __inet_hash(struct sock *sk)
341{ 358{
342 struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 359 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
343 struct hlist_head *list; 360 struct hlist_head *list;
344 rwlock_t *lock; 361 rwlock_t *lock;
345 362
@@ -354,7 +371,7 @@ static void __inet_hash(struct sock *sk)
354 371
355 inet_listen_wlock(hashinfo); 372 inet_listen_wlock(hashinfo);
356 __sk_add_node(sk, list); 373 __sk_add_node(sk, list);
357 sock_prot_inuse_add(sk->sk_prot, 1); 374 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
358 write_unlock(lock); 375 write_unlock(lock);
359 wake_up(&hashinfo->lhash_wait); 376 wake_up(&hashinfo->lhash_wait);
360} 377}
@@ -372,7 +389,7 @@ EXPORT_SYMBOL_GPL(inet_hash);
372void inet_unhash(struct sock *sk) 389void inet_unhash(struct sock *sk)
373{ 390{
374 rwlock_t *lock; 391 rwlock_t *lock;
375 struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 392 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
376 393
377 if (sk_unhashed(sk)) 394 if (sk_unhashed(sk))
378 goto out; 395 goto out;
@@ -387,7 +404,7 @@ void inet_unhash(struct sock *sk)
387 } 404 }
388 405
389 if (__sk_del_node_init(sk)) 406 if (__sk_del_node_init(sk))
390 sock_prot_inuse_add(sk->sk_prot, -1); 407 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
391 write_unlock_bh(lock); 408 write_unlock_bh(lock);
392out: 409out:
393 if (sk->sk_state == TCP_LISTEN) 410 if (sk->sk_state == TCP_LISTEN)
@@ -406,7 +423,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
406 struct inet_bind_hashbucket *head; 423 struct inet_bind_hashbucket *head;
407 struct inet_bind_bucket *tb; 424 struct inet_bind_bucket *tb;
408 int ret; 425 int ret;
409 struct net *net = sk->sk_net; 426 struct net *net = sock_net(sk);
410 427
411 if (!snum) { 428 if (!snum) {
412 int i, remaining, low, high, port; 429 int i, remaining, low, high, port;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 717c411a5c6b..ce16e9ac24c1 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -57,6 +57,7 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
57 printk(KERN_DEBUG "%s timewait_sock %p released\n", 57 printk(KERN_DEBUG "%s timewait_sock %p released\n",
58 tw->tw_prot->name, tw); 58 tw->tw_prot->name, tw);
59#endif 59#endif
60 release_net(twsk_net(tw));
60 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); 61 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
61 module_put(owner); 62 module_put(owner);
62 } 63 }
@@ -91,7 +92,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
91 92
92 /* Step 2: Remove SK from established hash. */ 93 /* Step 2: Remove SK from established hash. */
93 if (__sk_del_node_init(sk)) 94 if (__sk_del_node_init(sk))
94 sock_prot_inuse_add(sk->sk_prot, -1); 95 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
95 96
96 /* Step 3: Hash TW into TIMEWAIT chain. */ 97 /* Step 3: Hash TW into TIMEWAIT chain. */
97 inet_twsk_add_node(tw, &ehead->twchain); 98 inet_twsk_add_node(tw, &ehead->twchain);
@@ -124,7 +125,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
124 tw->tw_hash = sk->sk_hash; 125 tw->tw_hash = sk->sk_hash;
125 tw->tw_ipv6only = 0; 126 tw->tw_ipv6only = 0;
126 tw->tw_prot = sk->sk_prot_creator; 127 tw->tw_prot = sk->sk_prot_creator;
127 tw->tw_net = sk->sk_net; 128 twsk_net_set(tw, hold_net(sock_net(sk)));
128 atomic_set(&tw->tw_refcnt, 1); 129 atomic_set(&tw->tw_refcnt, 1);
129 inet_twsk_dead_node_init(tw); 130 inet_twsk_dead_node_init(tw);
130 __module_get(tw->tw_prot->owner); 131 __module_get(tw->tw_prot->owner);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index a4506c8cfef0..4813c39b438b 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -80,7 +80,7 @@ int ip_forward(struct sk_buff *skb)
80 if (!xfrm4_route_forward(skb)) 80 if (!xfrm4_route_forward(skb))
81 goto drop; 81 goto drop;
82 82
83 rt = (struct rtable*)skb->dst; 83 rt = skb->rtable;
84 84
85 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 85 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
86 goto sr_failed; 86 goto sr_failed;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 3b2e5adca838..cd6ce6ac6358 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -194,7 +194,7 @@ static void ip_expire(unsigned long arg)
194 194
195 spin_lock(&qp->q.lock); 195 spin_lock(&qp->q.lock);
196 196
197 if (qp->q.last_in & COMPLETE) 197 if (qp->q.last_in & INET_FRAG_COMPLETE)
198 goto out; 198 goto out;
199 199
200 ipq_kill(qp); 200 ipq_kill(qp);
@@ -202,10 +202,13 @@ static void ip_expire(unsigned long arg)
202 IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); 202 IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
203 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
204 204
205 if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) { 205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 struct net *net;
208
209 net = container_of(qp->q.net, struct net, ipv4.frags);
207 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 210 /* Send an ICMP "Fragment Reassembly Timeout" message. */
208 if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) { 211 if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) {
209 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 212 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
210 dev_put(head->dev); 213 dev_put(head->dev);
211 } 214 }
@@ -298,7 +301,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
298 int ihl, end; 301 int ihl, end;
299 int err = -ENOENT; 302 int err = -ENOENT;
300 303
301 if (qp->q.last_in & COMPLETE) 304 if (qp->q.last_in & INET_FRAG_COMPLETE)
302 goto err; 305 goto err;
303 306
304 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 307 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
@@ -324,9 +327,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
324 * or have different end, the segment is corrrupted. 327 * or have different end, the segment is corrrupted.
325 */ 328 */
326 if (end < qp->q.len || 329 if (end < qp->q.len ||
327 ((qp->q.last_in & LAST_IN) && end != qp->q.len)) 330 ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
328 goto err; 331 goto err;
329 qp->q.last_in |= LAST_IN; 332 qp->q.last_in |= INET_FRAG_LAST_IN;
330 qp->q.len = end; 333 qp->q.len = end;
331 } else { 334 } else {
332 if (end&7) { 335 if (end&7) {
@@ -336,7 +339,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
336 } 339 }
337 if (end > qp->q.len) { 340 if (end > qp->q.len) {
338 /* Some bits beyond end -> corruption. */ 341 /* Some bits beyond end -> corruption. */
339 if (qp->q.last_in & LAST_IN) 342 if (qp->q.last_in & INET_FRAG_LAST_IN)
340 goto err; 343 goto err;
341 qp->q.len = end; 344 qp->q.len = end;
342 } 345 }
@@ -435,9 +438,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
435 qp->q.meat += skb->len; 438 qp->q.meat += skb->len;
436 atomic_add(skb->truesize, &qp->q.net->mem); 439 atomic_add(skb->truesize, &qp->q.net->mem);
437 if (offset == 0) 440 if (offset == 0)
438 qp->q.last_in |= FIRST_IN; 441 qp->q.last_in |= INET_FRAG_FIRST_IN;
439 442
440 if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len) 443 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
444 qp->q.meat == qp->q.len)
441 return ip_frag_reasm(qp, prev, dev); 445 return ip_frag_reasm(qp, prev, dev);
442 446
443 write_lock(&ip4_frags.lock); 447 write_lock(&ip4_frags.lock);
@@ -553,7 +557,7 @@ out_nomem:
553out_oversize: 557out_oversize:
554 if (net_ratelimit()) 558 if (net_ratelimit())
555 printk(KERN_INFO 559 printk(KERN_INFO
556 "Oversized IP packet from %d.%d.%d.%d.\n", 560 "Oversized IP packet from " NIPQUAD_FMT ".\n",
557 NIPQUAD(qp->saddr)); 561 NIPQUAD(qp->saddr));
558out_fail: 562out_fail:
559 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 563 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
@@ -568,7 +572,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
568 572
569 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 573 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
570 574
571 net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net; 575 net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev);
572 /* Start by cleaning up the memory. */ 576 /* Start by cleaning up the memory. */
573 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) 577 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
574 ip_evictor(net); 578 ip_evictor(net);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index e7821ba7a9a0..2ada033406de 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -39,6 +39,8 @@
39#include <net/dsfield.h> 39#include <net/dsfield.h>
40#include <net/inet_ecn.h> 40#include <net/inet_ecn.h>
41#include <net/xfrm.h> 41#include <net/xfrm.h>
42#include <net/net_namespace.h>
43#include <net/netns/generic.h>
42 44
43#ifdef CONFIG_IPV6 45#ifdef CONFIG_IPV6
44#include <net/ipv6.h> 46#include <net/ipv6.h>
@@ -122,7 +124,14 @@ static void ipgre_tunnel_setup(struct net_device *dev);
122 124
123static int ipgre_fb_tunnel_init(struct net_device *dev); 125static int ipgre_fb_tunnel_init(struct net_device *dev);
124 126
125static struct net_device *ipgre_fb_tunnel_dev; 127#define HASH_SIZE 16
128
129static int ipgre_net_id;
130struct ipgre_net {
131 struct ip_tunnel *tunnels[4][HASH_SIZE];
132
133 struct net_device *fb_tunnel_dev;
134};
126 135
127/* Tunnel hash table */ 136/* Tunnel hash table */
128 137
@@ -142,39 +151,38 @@ static struct net_device *ipgre_fb_tunnel_dev;
142 will match fallback tunnel. 151 will match fallback tunnel.
143 */ 152 */
144 153
145#define HASH_SIZE 16
146#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 154#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
147 155
148static struct ip_tunnel *tunnels[4][HASH_SIZE]; 156#define tunnels_r_l tunnels[3]
149 157#define tunnels_r tunnels[2]
150#define tunnels_r_l (tunnels[3]) 158#define tunnels_l tunnels[1]
151#define tunnels_r (tunnels[2]) 159#define tunnels_wc tunnels[0]
152#define tunnels_l (tunnels[1])
153#define tunnels_wc (tunnels[0])
154 160
155static DEFINE_RWLOCK(ipgre_lock); 161static DEFINE_RWLOCK(ipgre_lock);
156 162
157/* Given src, dst and key, find appropriate for input tunnel. */ 163/* Given src, dst and key, find appropriate for input tunnel. */
158 164
159static struct ip_tunnel * ipgre_tunnel_lookup(__be32 remote, __be32 local, __be32 key) 165static struct ip_tunnel * ipgre_tunnel_lookup(struct net *net,
166 __be32 remote, __be32 local, __be32 key)
160{ 167{
161 unsigned h0 = HASH(remote); 168 unsigned h0 = HASH(remote);
162 unsigned h1 = HASH(key); 169 unsigned h1 = HASH(key);
163 struct ip_tunnel *t; 170 struct ip_tunnel *t;
171 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
164 172
165 for (t = tunnels_r_l[h0^h1]; t; t = t->next) { 173 for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
166 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) { 174 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
167 if (t->parms.i_key == key && (t->dev->flags&IFF_UP)) 175 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
168 return t; 176 return t;
169 } 177 }
170 } 178 }
171 for (t = tunnels_r[h0^h1]; t; t = t->next) { 179 for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
172 if (remote == t->parms.iph.daddr) { 180 if (remote == t->parms.iph.daddr) {
173 if (t->parms.i_key == key && (t->dev->flags&IFF_UP)) 181 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
174 return t; 182 return t;
175 } 183 }
176 } 184 }
177 for (t = tunnels_l[h1]; t; t = t->next) { 185 for (t = ign->tunnels_l[h1]; t; t = t->next) {
178 if (local == t->parms.iph.saddr || 186 if (local == t->parms.iph.saddr ||
179 (local == t->parms.iph.daddr && 187 (local == t->parms.iph.daddr &&
180 ipv4_is_multicast(local))) { 188 ipv4_is_multicast(local))) {
@@ -182,17 +190,18 @@ static struct ip_tunnel * ipgre_tunnel_lookup(__be32 remote, __be32 local, __be3
182 return t; 190 return t;
183 } 191 }
184 } 192 }
185 for (t = tunnels_wc[h1]; t; t = t->next) { 193 for (t = ign->tunnels_wc[h1]; t; t = t->next) {
186 if (t->parms.i_key == key && (t->dev->flags&IFF_UP)) 194 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
187 return t; 195 return t;
188 } 196 }
189 197
190 if (ipgre_fb_tunnel_dev->flags&IFF_UP) 198 if (ign->fb_tunnel_dev->flags&IFF_UP)
191 return netdev_priv(ipgre_fb_tunnel_dev); 199 return netdev_priv(ign->fb_tunnel_dev);
192 return NULL; 200 return NULL;
193} 201}
194 202
195static struct ip_tunnel **__ipgre_bucket(struct ip_tunnel_parm *parms) 203static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
204 struct ip_tunnel_parm *parms)
196{ 205{
197 __be32 remote = parms->iph.daddr; 206 __be32 remote = parms->iph.daddr;
198 __be32 local = parms->iph.saddr; 207 __be32 local = parms->iph.saddr;
@@ -207,17 +216,18 @@ static struct ip_tunnel **__ipgre_bucket(struct ip_tunnel_parm *parms)
207 h ^= HASH(remote); 216 h ^= HASH(remote);
208 } 217 }
209 218
210 return &tunnels[prio][h]; 219 return &ign->tunnels[prio][h];
211} 220}
212 221
213static inline struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t) 222static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
223 struct ip_tunnel *t)
214{ 224{
215 return __ipgre_bucket(&t->parms); 225 return __ipgre_bucket(ign, &t->parms);
216} 226}
217 227
218static void ipgre_tunnel_link(struct ip_tunnel *t) 228static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
219{ 229{
220 struct ip_tunnel **tp = ipgre_bucket(t); 230 struct ip_tunnel **tp = ipgre_bucket(ign, t);
221 231
222 t->next = *tp; 232 t->next = *tp;
223 write_lock_bh(&ipgre_lock); 233 write_lock_bh(&ipgre_lock);
@@ -225,11 +235,11 @@ static void ipgre_tunnel_link(struct ip_tunnel *t)
225 write_unlock_bh(&ipgre_lock); 235 write_unlock_bh(&ipgre_lock);
226} 236}
227 237
228static void ipgre_tunnel_unlink(struct ip_tunnel *t) 238static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
229{ 239{
230 struct ip_tunnel **tp; 240 struct ip_tunnel **tp;
231 241
232 for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) { 242 for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
233 if (t == *tp) { 243 if (t == *tp) {
234 write_lock_bh(&ipgre_lock); 244 write_lock_bh(&ipgre_lock);
235 *tp = t->next; 245 *tp = t->next;
@@ -239,7 +249,8 @@ static void ipgre_tunnel_unlink(struct ip_tunnel *t)
239 } 249 }
240} 250}
241 251
242static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create) 252static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
253 struct ip_tunnel_parm *parms, int create)
243{ 254{
244 __be32 remote = parms->iph.daddr; 255 __be32 remote = parms->iph.daddr;
245 __be32 local = parms->iph.saddr; 256 __be32 local = parms->iph.saddr;
@@ -247,8 +258,9 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
247 struct ip_tunnel *t, **tp, *nt; 258 struct ip_tunnel *t, **tp, *nt;
248 struct net_device *dev; 259 struct net_device *dev;
249 char name[IFNAMSIZ]; 260 char name[IFNAMSIZ];
261 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
250 262
251 for (tp = __ipgre_bucket(parms); (t = *tp) != NULL; tp = &t->next) { 263 for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next) {
252 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) { 264 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
253 if (key == t->parms.i_key) 265 if (key == t->parms.i_key)
254 return t; 266 return t;
@@ -266,6 +278,8 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
266 if (!dev) 278 if (!dev)
267 return NULL; 279 return NULL;
268 280
281 dev_net_set(dev, net);
282
269 if (strchr(name, '%')) { 283 if (strchr(name, '%')) {
270 if (dev_alloc_name(dev, name) < 0) 284 if (dev_alloc_name(dev, name) < 0)
271 goto failed_free; 285 goto failed_free;
@@ -279,7 +293,7 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
279 goto failed_free; 293 goto failed_free;
280 294
281 dev_hold(dev); 295 dev_hold(dev);
282 ipgre_tunnel_link(nt); 296 ipgre_tunnel_link(ign, nt);
283 return nt; 297 return nt;
284 298
285failed_free: 299failed_free:
@@ -289,7 +303,10 @@ failed_free:
289 303
290static void ipgre_tunnel_uninit(struct net_device *dev) 304static void ipgre_tunnel_uninit(struct net_device *dev)
291{ 305{
292 ipgre_tunnel_unlink(netdev_priv(dev)); 306 struct net *net = dev_net(dev);
307 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
308
309 ipgre_tunnel_unlink(ign, netdev_priv(dev));
293 dev_put(dev); 310 dev_put(dev);
294} 311}
295 312
@@ -363,7 +380,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
363 } 380 }
364 381
365 read_lock(&ipgre_lock); 382 read_lock(&ipgre_lock);
366 t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((__be32*)p) + (grehlen>>2) - 1) : 0); 383 t = ipgre_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr,
384 (flags&GRE_KEY) ?
385 *(((__be32*)p) + (grehlen>>2) - 1) : 0);
367 if (t == NULL || t->parms.iph.daddr == 0 || 386 if (t == NULL || t->parms.iph.daddr == 0 ||
368 ipv4_is_multicast(t->parms.iph.daddr)) 387 ipv4_is_multicast(t->parms.iph.daddr))
369 goto out; 388 goto out;
@@ -476,7 +495,7 @@ out:
476 fl.fl4_dst = eiph->saddr; 495 fl.fl4_dst = eiph->saddr;
477 fl.fl4_tos = RT_TOS(eiph->tos); 496 fl.fl4_tos = RT_TOS(eiph->tos);
478 fl.proto = IPPROTO_GRE; 497 fl.proto = IPPROTO_GRE;
479 if (ip_route_output_key(&init_net, &rt, &fl)) { 498 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) {
480 kfree_skb(skb2); 499 kfree_skb(skb2);
481 return; 500 return;
482 } 501 }
@@ -489,7 +508,7 @@ out:
489 fl.fl4_dst = eiph->daddr; 508 fl.fl4_dst = eiph->daddr;
490 fl.fl4_src = eiph->saddr; 509 fl.fl4_src = eiph->saddr;
491 fl.fl4_tos = eiph->tos; 510 fl.fl4_tos = eiph->tos;
492 if (ip_route_output_key(&init_net, &rt, &fl) || 511 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
493 rt->u.dst.dev->type != ARPHRD_IPGRE) { 512 rt->u.dst.dev->type != ARPHRD_IPGRE) {
494 ip_rt_put(rt); 513 ip_rt_put(rt);
495 kfree_skb(skb2); 514 kfree_skb(skb2);
@@ -596,7 +615,8 @@ static int ipgre_rcv(struct sk_buff *skb)
596 } 615 }
597 616
598 read_lock(&ipgre_lock); 617 read_lock(&ipgre_lock);
599 if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) { 618 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev),
619 iph->saddr, iph->daddr, key)) != NULL) {
600 secpath_reset(skb); 620 secpath_reset(skb);
601 621
602 skb->protocol = *(__be16*)(h + 2); 622 skb->protocol = *(__be16*)(h + 2);
@@ -619,7 +639,7 @@ static int ipgre_rcv(struct sk_buff *skb)
619#ifdef CONFIG_NET_IPGRE_BROADCAST 639#ifdef CONFIG_NET_IPGRE_BROADCAST
620 if (ipv4_is_multicast(iph->daddr)) { 640 if (ipv4_is_multicast(iph->daddr)) {
621 /* Looped back packet, drop it! */ 641 /* Looped back packet, drop it! */
622 if (((struct rtable*)skb->dst)->fl.iif == 0) 642 if (skb->rtable->fl.iif == 0)
623 goto drop; 643 goto drop;
624 tunnel->stat.multicast++; 644 tunnel->stat.multicast++;
625 skb->pkt_type = PACKET_BROADCAST; 645 skb->pkt_type = PACKET_BROADCAST;
@@ -699,7 +719,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
699 } 719 }
700 720
701 if (skb->protocol == htons(ETH_P_IP)) { 721 if (skb->protocol == htons(ETH_P_IP)) {
702 rt = (struct rtable*)skb->dst; 722 rt = skb->rtable;
703 if ((dst = rt->rt_gateway) == 0) 723 if ((dst = rt->rt_gateway) == 0)
704 goto tx_error_icmp; 724 goto tx_error_icmp;
705 } 725 }
@@ -744,7 +764,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
744 .saddr = tiph->saddr, 764 .saddr = tiph->saddr,
745 .tos = RT_TOS(tos) } }, 765 .tos = RT_TOS(tos) } },
746 .proto = IPPROTO_GRE }; 766 .proto = IPPROTO_GRE };
747 if (ip_route_output_key(&init_net, &rt, &fl)) { 767 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
748 tunnel->stat.tx_carrier_errors++; 768 tunnel->stat.tx_carrier_errors++;
749 goto tx_error; 769 goto tx_error;
750 } 770 }
@@ -917,7 +937,7 @@ static void ipgre_tunnel_bind_dev(struct net_device *dev)
917 .tos = RT_TOS(iph->tos) } }, 937 .tos = RT_TOS(iph->tos) } },
918 .proto = IPPROTO_GRE }; 938 .proto = IPPROTO_GRE };
919 struct rtable *rt; 939 struct rtable *rt;
920 if (!ip_route_output_key(&init_net, &rt, &fl)) { 940 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
921 tdev = rt->u.dst.dev; 941 tdev = rt->u.dst.dev;
922 ip_rt_put(rt); 942 ip_rt_put(rt);
923 } 943 }
@@ -925,7 +945,7 @@ static void ipgre_tunnel_bind_dev(struct net_device *dev)
925 } 945 }
926 946
927 if (!tdev && tunnel->parms.link) 947 if (!tdev && tunnel->parms.link)
928 tdev = __dev_get_by_index(&init_net, tunnel->parms.link); 948 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
929 949
930 if (tdev) { 950 if (tdev) {
931 hlen = tdev->hard_header_len; 951 hlen = tdev->hard_header_len;
@@ -954,16 +974,18 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
954 int err = 0; 974 int err = 0;
955 struct ip_tunnel_parm p; 975 struct ip_tunnel_parm p;
956 struct ip_tunnel *t; 976 struct ip_tunnel *t;
977 struct net *net = dev_net(dev);
978 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
957 979
958 switch (cmd) { 980 switch (cmd) {
959 case SIOCGETTUNNEL: 981 case SIOCGETTUNNEL:
960 t = NULL; 982 t = NULL;
961 if (dev == ipgre_fb_tunnel_dev) { 983 if (dev == ign->fb_tunnel_dev) {
962 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 984 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
963 err = -EFAULT; 985 err = -EFAULT;
964 break; 986 break;
965 } 987 }
966 t = ipgre_tunnel_locate(&p, 0); 988 t = ipgre_tunnel_locate(net, &p, 0);
967 } 989 }
968 if (t == NULL) 990 if (t == NULL)
969 t = netdev_priv(dev); 991 t = netdev_priv(dev);
@@ -995,9 +1017,9 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
995 if (!(p.o_flags&GRE_KEY)) 1017 if (!(p.o_flags&GRE_KEY))
996 p.o_key = 0; 1018 p.o_key = 0;
997 1019
998 t = ipgre_tunnel_locate(&p, cmd == SIOCADDTUNNEL); 1020 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
999 1021
1000 if (dev != ipgre_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 1022 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1001 if (t != NULL) { 1023 if (t != NULL) {
1002 if (t->dev != dev) { 1024 if (t->dev != dev) {
1003 err = -EEXIST; 1025 err = -EEXIST;
@@ -1017,14 +1039,14 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1017 err = -EINVAL; 1039 err = -EINVAL;
1018 break; 1040 break;
1019 } 1041 }
1020 ipgre_tunnel_unlink(t); 1042 ipgre_tunnel_unlink(ign, t);
1021 t->parms.iph.saddr = p.iph.saddr; 1043 t->parms.iph.saddr = p.iph.saddr;
1022 t->parms.iph.daddr = p.iph.daddr; 1044 t->parms.iph.daddr = p.iph.daddr;
1023 t->parms.i_key = p.i_key; 1045 t->parms.i_key = p.i_key;
1024 t->parms.o_key = p.o_key; 1046 t->parms.o_key = p.o_key;
1025 memcpy(dev->dev_addr, &p.iph.saddr, 4); 1047 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1026 memcpy(dev->broadcast, &p.iph.daddr, 4); 1048 memcpy(dev->broadcast, &p.iph.daddr, 4);
1027 ipgre_tunnel_link(t); 1049 ipgre_tunnel_link(ign, t);
1028 netdev_state_change(dev); 1050 netdev_state_change(dev);
1029 } 1051 }
1030 } 1052 }
@@ -1052,15 +1074,15 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1052 if (!capable(CAP_NET_ADMIN)) 1074 if (!capable(CAP_NET_ADMIN))
1053 goto done; 1075 goto done;
1054 1076
1055 if (dev == ipgre_fb_tunnel_dev) { 1077 if (dev == ign->fb_tunnel_dev) {
1056 err = -EFAULT; 1078 err = -EFAULT;
1057 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1079 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1058 goto done; 1080 goto done;
1059 err = -ENOENT; 1081 err = -ENOENT;
1060 if ((t = ipgre_tunnel_locate(&p, 0)) == NULL) 1082 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1061 goto done; 1083 goto done;
1062 err = -EPERM; 1084 err = -EPERM;
1063 if (t == netdev_priv(ipgre_fb_tunnel_dev)) 1085 if (t == netdev_priv(ign->fb_tunnel_dev))
1064 goto done; 1086 goto done;
1065 dev = t->dev; 1087 dev = t->dev;
1066 } 1088 }
@@ -1173,7 +1195,7 @@ static int ipgre_open(struct net_device *dev)
1173 .tos = RT_TOS(t->parms.iph.tos) } }, 1195 .tos = RT_TOS(t->parms.iph.tos) } },
1174 .proto = IPPROTO_GRE }; 1196 .proto = IPPROTO_GRE };
1175 struct rtable *rt; 1197 struct rtable *rt;
1176 if (ip_route_output_key(&init_net, &rt, &fl)) 1198 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1177 return -EADDRNOTAVAIL; 1199 return -EADDRNOTAVAIL;
1178 dev = rt->u.dst.dev; 1200 dev = rt->u.dst.dev;
1179 ip_rt_put(rt); 1201 ip_rt_put(rt);
@@ -1190,7 +1212,7 @@ static int ipgre_close(struct net_device *dev)
1190 struct ip_tunnel *t = netdev_priv(dev); 1212 struct ip_tunnel *t = netdev_priv(dev);
1191 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 1213 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1192 struct in_device *in_dev; 1214 struct in_device *in_dev;
1193 in_dev = inetdev_by_index(dev->nd_net, t->mlink); 1215 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1194 if (in_dev) { 1216 if (in_dev) {
1195 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 1217 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1196 in_dev_put(in_dev); 1218 in_dev_put(in_dev);
@@ -1216,6 +1238,7 @@ static void ipgre_tunnel_setup(struct net_device *dev)
1216 dev->flags = IFF_NOARP; 1238 dev->flags = IFF_NOARP;
1217 dev->iflink = 0; 1239 dev->iflink = 0;
1218 dev->addr_len = 4; 1240 dev->addr_len = 4;
1241 dev->features |= NETIF_F_NETNS_LOCAL;
1219} 1242}
1220 1243
1221static int ipgre_tunnel_init(struct net_device *dev) 1244static int ipgre_tunnel_init(struct net_device *dev)
@@ -1251,10 +1274,11 @@ static int ipgre_tunnel_init(struct net_device *dev)
1251 return 0; 1274 return 0;
1252} 1275}
1253 1276
1254static int __init ipgre_fb_tunnel_init(struct net_device *dev) 1277static int ipgre_fb_tunnel_init(struct net_device *dev)
1255{ 1278{
1256 struct ip_tunnel *tunnel = netdev_priv(dev); 1279 struct ip_tunnel *tunnel = netdev_priv(dev);
1257 struct iphdr *iph = &tunnel->parms.iph; 1280 struct iphdr *iph = &tunnel->parms.iph;
1281 struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
1258 1282
1259 tunnel->dev = dev; 1283 tunnel->dev = dev;
1260 strcpy(tunnel->parms.name, dev->name); 1284 strcpy(tunnel->parms.name, dev->name);
@@ -1265,7 +1289,7 @@ static int __init ipgre_fb_tunnel_init(struct net_device *dev)
1265 tunnel->hlen = sizeof(struct iphdr) + 4; 1289 tunnel->hlen = sizeof(struct iphdr) + 4;
1266 1290
1267 dev_hold(dev); 1291 dev_hold(dev);
1268 tunnels_wc[0] = tunnel; 1292 ign->tunnels_wc[0] = tunnel;
1269 return 0; 1293 return 0;
1270} 1294}
1271 1295
@@ -1273,56 +1297,98 @@ static int __init ipgre_fb_tunnel_init(struct net_device *dev)
1273static struct net_protocol ipgre_protocol = { 1297static struct net_protocol ipgre_protocol = {
1274 .handler = ipgre_rcv, 1298 .handler = ipgre_rcv,
1275 .err_handler = ipgre_err, 1299 .err_handler = ipgre_err,
1300 .netns_ok = 1,
1276}; 1301};
1277 1302
1303static void ipgre_destroy_tunnels(struct ipgre_net *ign)
1304{
1305 int prio;
1278 1306
1279/* 1307 for (prio = 0; prio < 4; prio++) {
1280 * And now the modules code and kernel interface. 1308 int h;
1281 */ 1309 for (h = 0; h < HASH_SIZE; h++) {
1310 struct ip_tunnel *t;
1311 while ((t = ign->tunnels[prio][h]) != NULL)
1312 unregister_netdevice(t->dev);
1313 }
1314 }
1315}
1282 1316
1283static int __init ipgre_init(void) 1317static int ipgre_init_net(struct net *net)
1284{ 1318{
1285 int err; 1319 int err;
1320 struct ipgre_net *ign;
1286 1321
1287 printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); 1322 err = -ENOMEM;
1323 ign = kzalloc(sizeof(struct ipgre_net), GFP_KERNEL);
1324 if (ign == NULL)
1325 goto err_alloc;
1288 1326
1289 if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) { 1327 err = net_assign_generic(net, ipgre_net_id, ign);
1290 printk(KERN_INFO "ipgre init: can't add protocol\n"); 1328 if (err < 0)
1291 return -EAGAIN; 1329 goto err_assign;
1292 }
1293 1330
1294 ipgre_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0", 1331 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1295 ipgre_tunnel_setup); 1332 ipgre_tunnel_setup);
1296 if (!ipgre_fb_tunnel_dev) { 1333 if (!ign->fb_tunnel_dev) {
1297 err = -ENOMEM; 1334 err = -ENOMEM;
1298 goto err1; 1335 goto err_alloc_dev;
1299 } 1336 }
1300 1337
1301 ipgre_fb_tunnel_dev->init = ipgre_fb_tunnel_init; 1338 ign->fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1339 dev_net_set(ign->fb_tunnel_dev, net);
1302 1340
1303 if ((err = register_netdev(ipgre_fb_tunnel_dev))) 1341 if ((err = register_netdev(ign->fb_tunnel_dev)))
1304 goto err2; 1342 goto err_reg_dev;
1305out: 1343
1344 return 0;
1345
1346err_reg_dev:
1347 free_netdev(ign->fb_tunnel_dev);
1348err_alloc_dev:
1349 /* nothing */
1350err_assign:
1351 kfree(ign);
1352err_alloc:
1306 return err; 1353 return err;
1307err2:
1308 free_netdev(ipgre_fb_tunnel_dev);
1309err1:
1310 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1311 goto out;
1312} 1354}
1313 1355
1314static void __exit ipgre_destroy_tunnels(void) 1356static void ipgre_exit_net(struct net *net)
1315{ 1357{
1316 int prio; 1358 struct ipgre_net *ign;
1317 1359
1318 for (prio = 0; prio < 4; prio++) { 1360 ign = net_generic(net, ipgre_net_id);
1319 int h; 1361 rtnl_lock();
1320 for (h = 0; h < HASH_SIZE; h++) { 1362 ipgre_destroy_tunnels(ign);
1321 struct ip_tunnel *t; 1363 rtnl_unlock();
1322 while ((t = tunnels[prio][h]) != NULL) 1364 kfree(ign);
1323 unregister_netdevice(t->dev); 1365}
1324 } 1366
1367static struct pernet_operations ipgre_net_ops = {
1368 .init = ipgre_init_net,
1369 .exit = ipgre_exit_net,
1370};
1371
1372/*
1373 * And now the modules code and kernel interface.
1374 */
1375
1376static int __init ipgre_init(void)
1377{
1378 int err;
1379
1380 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1381
1382 if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1383 printk(KERN_INFO "ipgre init: can't add protocol\n");
1384 return -EAGAIN;
1325 } 1385 }
1386
1387 err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
1388 if (err < 0)
1389 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1390
1391 return err;
1326} 1392}
1327 1393
1328static void __exit ipgre_fini(void) 1394static void __exit ipgre_fini(void)
@@ -1330,9 +1396,7 @@ static void __exit ipgre_fini(void)
1330 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) 1396 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1331 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1397 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1332 1398
1333 rtnl_lock(); 1399 unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
1334 ipgre_destroy_tunnels();
1335 rtnl_unlock();
1336} 1400}
1337 1401
1338module_init(ipgre_init); 1402module_init(ipgre_init);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 65631391d479..7b4bad6d572f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -160,6 +160,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
160 struct ip_ra_chain *ra; 160 struct ip_ra_chain *ra;
161 u8 protocol = ip_hdr(skb)->protocol; 161 u8 protocol = ip_hdr(skb)->protocol;
162 struct sock *last = NULL; 162 struct sock *last = NULL;
163 struct net_device *dev = skb->dev;
163 164
164 read_lock(&ip_ra_lock); 165 read_lock(&ip_ra_lock);
165 for (ra = ip_ra_chain; ra; ra = ra->next) { 166 for (ra = ip_ra_chain; ra; ra = ra->next) {
@@ -170,7 +171,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
170 */ 171 */
171 if (sk && inet_sk(sk)->num == protocol && 172 if (sk && inet_sk(sk)->num == protocol &&
172 (!sk->sk_bound_dev_if || 173 (!sk->sk_bound_dev_if ||
173 sk->sk_bound_dev_if == skb->dev->ifindex)) { 174 sk->sk_bound_dev_if == dev->ifindex) &&
175 sock_net(sk) == dev_net(dev)) {
174 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 176 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
175 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { 177 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) {
176 read_unlock(&ip_ra_lock); 178 read_unlock(&ip_ra_lock);
@@ -197,6 +199,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
197 199
198static int ip_local_deliver_finish(struct sk_buff *skb) 200static int ip_local_deliver_finish(struct sk_buff *skb)
199{ 201{
202 struct net *net = dev_net(skb->dev);
203
200 __skb_pull(skb, ip_hdrlen(skb)); 204 __skb_pull(skb, ip_hdrlen(skb));
201 205
202 /* Point into the IP datagram, just past the header. */ 206 /* Point into the IP datagram, just past the header. */
@@ -212,7 +216,8 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
212 raw = raw_local_deliver(skb, protocol); 216 raw = raw_local_deliver(skb, protocol);
213 217
214 hash = protocol & (MAX_INET_PROTOS - 1); 218 hash = protocol & (MAX_INET_PROTOS - 1);
215 if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { 219 ipprot = rcu_dereference(inet_protos[hash]);
220 if (ipprot != NULL && (net == &init_net || ipprot->netns_ok)) {
216 int ret; 221 int ret;
217 222
218 if (!ipprot->no_policy) { 223 if (!ipprot->no_policy) {
@@ -283,13 +288,14 @@ static inline int ip_rcv_options(struct sk_buff *skb)
283 } 288 }
284 289
285 iph = ip_hdr(skb); 290 iph = ip_hdr(skb);
291 opt = &(IPCB(skb)->opt);
292 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
286 293
287 if (ip_options_compile(NULL, skb)) { 294 if (ip_options_compile(dev_net(dev), opt, skb)) {
288 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 295 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
289 goto drop; 296 goto drop;
290 } 297 }
291 298
292 opt = &(IPCB(skb)->opt);
293 if (unlikely(opt->srr)) { 299 if (unlikely(opt->srr)) {
294 struct in_device *in_dev = in_dev_get(dev); 300 struct in_device *in_dev = in_dev_get(dev);
295 if (in_dev) { 301 if (in_dev) {
@@ -297,7 +303,7 @@ static inline int ip_rcv_options(struct sk_buff *skb)
297 if (IN_DEV_LOG_MARTIANS(in_dev) && 303 if (IN_DEV_LOG_MARTIANS(in_dev) &&
298 net_ratelimit()) 304 net_ratelimit())
299 printk(KERN_INFO "source route option " 305 printk(KERN_INFO "source route option "
300 "%u.%u.%u.%u -> %u.%u.%u.%u\n", 306 NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
301 NIPQUAD(iph->saddr), 307 NIPQUAD(iph->saddr),
302 NIPQUAD(iph->daddr)); 308 NIPQUAD(iph->daddr));
303 in_dev_put(in_dev); 309 in_dev_put(in_dev);
@@ -351,7 +357,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
351 if (iph->ihl > 5 && ip_rcv_options(skb)) 357 if (iph->ihl > 5 && ip_rcv_options(skb))
352 goto drop; 358 goto drop;
353 359
354 rt = (struct rtable*)skb->dst; 360 rt = skb->rtable;
355 if (rt->rt_type == RTN_MULTICAST) 361 if (rt->rt_type == RTN_MULTICAST)
356 IP_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS); 362 IP_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS);
357 else if (rt->rt_type == RTN_BROADCAST) 363 else if (rt->rt_type == RTN_BROADCAST)
@@ -372,9 +378,6 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
372 struct iphdr *iph; 378 struct iphdr *iph;
373 u32 len; 379 u32 len;
374 380
375 if (dev->nd_net != &init_net)
376 goto drop;
377
378 /* When the interface is in promisc. mode, drop all the crap 381 /* When the interface is in promisc. mode, drop all the crap
379 * that it receives, do not try to analyse it. 382 * that it receives, do not try to analyse it.
380 */ 383 */
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 4d315158fd3c..d107543d3f81 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -45,7 +45,6 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
45 memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options)); 45 memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options));
46 memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen); 46 memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen);
47 opt = &(IPCB(skb)->opt); 47 opt = &(IPCB(skb)->opt);
48 opt->is_data = 0;
49 48
50 if (opt->srr) 49 if (opt->srr)
51 memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4); 50 memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4);
@@ -95,8 +94,6 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
95 94
96 memset(dopt, 0, sizeof(struct ip_options)); 95 memset(dopt, 0, sizeof(struct ip_options));
97 96
98 dopt->is_data = 1;
99
100 sopt = &(IPCB(skb)->opt); 97 sopt = &(IPCB(skb)->opt);
101 98
102 if (sopt->optlen == 0) { 99 if (sopt->optlen == 0) {
@@ -107,10 +104,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
107 sptr = skb_network_header(skb); 104 sptr = skb_network_header(skb);
108 dptr = dopt->__data; 105 dptr = dopt->__data;
109 106
110 if (skb->dst) 107 daddr = skb->rtable->rt_spec_dst;
111 daddr = ((struct rtable*)skb->dst)->rt_spec_dst;
112 else
113 daddr = ip_hdr(skb)->daddr;
114 108
115 if (sopt->rr) { 109 if (sopt->rr) {
116 optlen = sptr[sopt->rr+1]; 110 optlen = sptr[sopt->rr+1];
@@ -151,7 +145,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
151 __be32 addr; 145 __be32 addr;
152 146
153 memcpy(&addr, sptr+soffset-1, 4); 147 memcpy(&addr, sptr+soffset-1, 4);
154 if (inet_addr_type(&init_net, addr) != RTN_LOCAL) { 148 if (inet_addr_type(dev_net(skb->dst->dev), addr) != RTN_LOCAL) {
155 dopt->ts_needtime = 1; 149 dopt->ts_needtime = 1;
156 soffset += 8; 150 soffset += 8;
157 } 151 }
@@ -254,26 +248,22 @@ void ip_options_fragment(struct sk_buff * skb)
254 * If opt == NULL, then skb->data should point to IP header. 248 * If opt == NULL, then skb->data should point to IP header.
255 */ 249 */
256 250
257int ip_options_compile(struct ip_options * opt, struct sk_buff * skb) 251int ip_options_compile(struct net *net,
252 struct ip_options * opt, struct sk_buff * skb)
258{ 253{
259 int l; 254 int l;
260 unsigned char * iph; 255 unsigned char * iph;
261 unsigned char * optptr; 256 unsigned char * optptr;
262 int optlen; 257 int optlen;
263 unsigned char * pp_ptr = NULL; 258 unsigned char * pp_ptr = NULL;
264 struct rtable *rt = skb ? (struct rtable*)skb->dst : NULL; 259 struct rtable *rt = NULL;
265 260
266 if (!opt) { 261 if (skb != NULL) {
267 opt = &(IPCB(skb)->opt); 262 rt = skb->rtable;
268 iph = skb_network_header(skb); 263 optptr = (unsigned char *)&(ip_hdr(skb)[1]);
269 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); 264 } else
270 optptr = iph + sizeof(struct iphdr); 265 optptr = opt->__data;
271 opt->is_data = 0; 266 iph = optptr - sizeof(struct iphdr);
272 } else {
273 optptr = opt->is_data ? opt->__data :
274 (unsigned char *)&(ip_hdr(skb)[1]);
275 iph = optptr - sizeof(struct iphdr);
276 }
277 267
278 for (l = opt->optlen; l > 0; ) { 268 for (l = opt->optlen; l > 0; ) {
279 switch (*optptr) { 269 switch (*optptr) {
@@ -400,7 +390,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
400 { 390 {
401 __be32 addr; 391 __be32 addr;
402 memcpy(&addr, &optptr[optptr[2]-1], 4); 392 memcpy(&addr, &optptr[optptr[2]-1], 4);
403 if (inet_addr_type(&init_net, addr) == RTN_UNICAST) 393 if (inet_addr_type(net, addr) == RTN_UNICAST)
404 break; 394 break;
405 if (skb) 395 if (skb)
406 timeptr = (__be32*)&optptr[optptr[2]+3]; 396 timeptr = (__be32*)&optptr[optptr[2]+3];
@@ -517,14 +507,13 @@ static struct ip_options *ip_options_get_alloc(const int optlen)
517 GFP_KERNEL); 507 GFP_KERNEL);
518} 508}
519 509
520static int ip_options_get_finish(struct ip_options **optp, 510static int ip_options_get_finish(struct net *net, struct ip_options **optp,
521 struct ip_options *opt, int optlen) 511 struct ip_options *opt, int optlen)
522{ 512{
523 while (optlen & 3) 513 while (optlen & 3)
524 opt->__data[optlen++] = IPOPT_END; 514 opt->__data[optlen++] = IPOPT_END;
525 opt->optlen = optlen; 515 opt->optlen = optlen;
526 opt->is_data = 1; 516 if (optlen && ip_options_compile(net, opt, NULL)) {
527 if (optlen && ip_options_compile(opt, NULL)) {
528 kfree(opt); 517 kfree(opt);
529 return -EINVAL; 518 return -EINVAL;
530 } 519 }
@@ -533,7 +522,8 @@ static int ip_options_get_finish(struct ip_options **optp,
533 return 0; 522 return 0;
534} 523}
535 524
536int ip_options_get_from_user(struct ip_options **optp, unsigned char __user *data, int optlen) 525int ip_options_get_from_user(struct net *net, struct ip_options **optp,
526 unsigned char __user *data, int optlen)
537{ 527{
538 struct ip_options *opt = ip_options_get_alloc(optlen); 528 struct ip_options *opt = ip_options_get_alloc(optlen);
539 529
@@ -543,10 +533,11 @@ int ip_options_get_from_user(struct ip_options **optp, unsigned char __user *dat
543 kfree(opt); 533 kfree(opt);
544 return -EFAULT; 534 return -EFAULT;
545 } 535 }
546 return ip_options_get_finish(optp, opt, optlen); 536 return ip_options_get_finish(net, optp, opt, optlen);
547} 537}
548 538
549int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen) 539int ip_options_get(struct net *net, struct ip_options **optp,
540 unsigned char *data, int optlen)
550{ 541{
551 struct ip_options *opt = ip_options_get_alloc(optlen); 542 struct ip_options *opt = ip_options_get_alloc(optlen);
552 543
@@ -554,14 +545,14 @@ int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen)
554 return -ENOMEM; 545 return -ENOMEM;
555 if (optlen) 546 if (optlen)
556 memcpy(opt->__data, data, optlen); 547 memcpy(opt->__data, data, optlen);
557 return ip_options_get_finish(optp, opt, optlen); 548 return ip_options_get_finish(net, optp, opt, optlen);
558} 549}
559 550
560void ip_forward_options(struct sk_buff *skb) 551void ip_forward_options(struct sk_buff *skb)
561{ 552{
562 struct ip_options * opt = &(IPCB(skb)->opt); 553 struct ip_options * opt = &(IPCB(skb)->opt);
563 unsigned char * optptr; 554 unsigned char * optptr;
564 struct rtable *rt = (struct rtable*)skb->dst; 555 struct rtable *rt = skb->rtable;
565 unsigned char *raw = skb_network_header(skb); 556 unsigned char *raw = skb_network_header(skb);
566 557
567 if (opt->rr_needaddr) { 558 if (opt->rr_needaddr) {
@@ -609,7 +600,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
609 __be32 nexthop; 600 __be32 nexthop;
610 struct iphdr *iph = ip_hdr(skb); 601 struct iphdr *iph = ip_hdr(skb);
611 unsigned char *optptr = skb_network_header(skb) + opt->srr; 602 unsigned char *optptr = skb_network_header(skb) + opt->srr;
612 struct rtable *rt = (struct rtable*)skb->dst; 603 struct rtable *rt = skb->rtable;
613 struct rtable *rt2; 604 struct rtable *rt2;
614 int err; 605 int err;
615 606
@@ -634,13 +625,13 @@ int ip_options_rcv_srr(struct sk_buff *skb)
634 } 625 }
635 memcpy(&nexthop, &optptr[srrptr-1], 4); 626 memcpy(&nexthop, &optptr[srrptr-1], 4);
636 627
637 rt = (struct rtable*)skb->dst; 628 rt = skb->rtable;
638 skb->dst = NULL; 629 skb->rtable = NULL;
639 err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); 630 err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
640 rt2 = (struct rtable*)skb->dst; 631 rt2 = skb->rtable;
641 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { 632 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
642 ip_rt_put(rt2); 633 ip_rt_put(rt2);
643 skb->dst = &rt->u.dst; 634 skb->rtable = rt;
644 return -EINVAL; 635 return -EINVAL;
645 } 636 }
646 ip_rt_put(rt); 637 ip_rt_put(rt);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 341779e685d9..08349267ceb4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -142,7 +142,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
142 __be32 saddr, __be32 daddr, struct ip_options *opt) 142 __be32 saddr, __be32 daddr, struct ip_options *opt)
143{ 143{
144 struct inet_sock *inet = inet_sk(sk); 144 struct inet_sock *inet = inet_sk(sk);
145 struct rtable *rt = (struct rtable *)skb->dst; 145 struct rtable *rt = skb->rtable;
146 struct iphdr *iph; 146 struct iphdr *iph;
147 147
148 /* Build the IP header. */ 148 /* Build the IP header. */
@@ -240,7 +240,7 @@ static int ip_finish_output(struct sk_buff *skb)
240int ip_mc_output(struct sk_buff *skb) 240int ip_mc_output(struct sk_buff *skb)
241{ 241{
242 struct sock *sk = skb->sk; 242 struct sock *sk = skb->sk;
243 struct rtable *rt = (struct rtable*)skb->dst; 243 struct rtable *rt = skb->rtable;
244 struct net_device *dev = rt->u.dst.dev; 244 struct net_device *dev = rt->u.dst.dev;
245 245
246 /* 246 /*
@@ -321,7 +321,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
321 /* Skip all of this if the packet is already routed, 321 /* Skip all of this if the packet is already routed,
322 * f.e. by something like SCTP. 322 * f.e. by something like SCTP.
323 */ 323 */
324 rt = (struct rtable *) skb->dst; 324 rt = skb->rtable;
325 if (rt != NULL) 325 if (rt != NULL)
326 goto packet_routed; 326 goto packet_routed;
327 327
@@ -351,7 +351,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
351 * itself out. 351 * itself out.
352 */ 352 */
353 security_sk_classify_flow(sk, &fl); 353 security_sk_classify_flow(sk, &fl);
354 if (ip_route_output_flow(&init_net, &rt, &fl, sk, 0)) 354 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
355 goto no_route; 355 goto no_route;
356 } 356 }
357 sk_setup_caps(sk, &rt->u.dst); 357 sk_setup_caps(sk, &rt->u.dst);
@@ -441,7 +441,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
441 unsigned int mtu, hlen, left, len, ll_rs, pad; 441 unsigned int mtu, hlen, left, len, ll_rs, pad;
442 int offset; 442 int offset;
443 __be16 not_last_frag; 443 __be16 not_last_frag;
444 struct rtable *rt = (struct rtable*)skb->dst; 444 struct rtable *rt = skb->rtable;
445 int err = 0; 445 int err = 0;
446 446
447 dev = rt->u.dst.dev; 447 dev = rt->u.dst.dev;
@@ -825,7 +825,7 @@ int ip_append_data(struct sock *sk,
825 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ? 825 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
826 rt->u.dst.dev->mtu : 826 rt->u.dst.dev->mtu :
827 dst_mtu(rt->u.dst.path); 827 dst_mtu(rt->u.dst.path);
828 inet->cork.rt = rt; 828 inet->cork.dst = &rt->u.dst;
829 inet->cork.length = 0; 829 inet->cork.length = 0;
830 sk->sk_sndmsg_page = NULL; 830 sk->sk_sndmsg_page = NULL;
831 sk->sk_sndmsg_off = 0; 831 sk->sk_sndmsg_off = 0;
@@ -834,7 +834,7 @@ int ip_append_data(struct sock *sk,
834 transhdrlen += exthdrlen; 834 transhdrlen += exthdrlen;
835 } 835 }
836 } else { 836 } else {
837 rt = inet->cork.rt; 837 rt = (struct rtable *)inet->cork.dst;
838 if (inet->cork.flags & IPCORK_OPT) 838 if (inet->cork.flags & IPCORK_OPT)
839 opt = inet->cork.opt; 839 opt = inet->cork.opt;
840 840
@@ -1083,7 +1083,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1083 if (skb_queue_empty(&sk->sk_write_queue)) 1083 if (skb_queue_empty(&sk->sk_write_queue))
1084 return -EINVAL; 1084 return -EINVAL;
1085 1085
1086 rt = inet->cork.rt; 1086 rt = (struct rtable *)inet->cork.dst;
1087 if (inet->cork.flags & IPCORK_OPT) 1087 if (inet->cork.flags & IPCORK_OPT)
1088 opt = inet->cork.opt; 1088 opt = inet->cork.opt;
1089 1089
@@ -1208,10 +1208,8 @@ static void ip_cork_release(struct inet_sock *inet)
1208 inet->cork.flags &= ~IPCORK_OPT; 1208 inet->cork.flags &= ~IPCORK_OPT;
1209 kfree(inet->cork.opt); 1209 kfree(inet->cork.opt);
1210 inet->cork.opt = NULL; 1210 inet->cork.opt = NULL;
1211 if (inet->cork.rt) { 1211 dst_release(inet->cork.dst);
1212 ip_rt_put(inet->cork.rt); 1212 inet->cork.dst = NULL;
1213 inet->cork.rt = NULL;
1214 }
1215} 1213}
1216 1214
1217/* 1215/*
@@ -1224,7 +1222,7 @@ int ip_push_pending_frames(struct sock *sk)
1224 struct sk_buff **tail_skb; 1222 struct sk_buff **tail_skb;
1225 struct inet_sock *inet = inet_sk(sk); 1223 struct inet_sock *inet = inet_sk(sk);
1226 struct ip_options *opt = NULL; 1224 struct ip_options *opt = NULL;
1227 struct rtable *rt = inet->cork.rt; 1225 struct rtable *rt = (struct rtable *)inet->cork.dst;
1228 struct iphdr *iph; 1226 struct iphdr *iph;
1229 __be16 df = 0; 1227 __be16 df = 0;
1230 __u8 ttl; 1228 __u8 ttl;
@@ -1357,7 +1355,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1357 } replyopts; 1355 } replyopts;
1358 struct ipcm_cookie ipc; 1356 struct ipcm_cookie ipc;
1359 __be32 daddr; 1357 __be32 daddr;
1360 struct rtable *rt = (struct rtable*)skb->dst; 1358 struct rtable *rt = skb->rtable;
1361 1359
1362 if (ip_options_echo(&replyopts.opt, skb)) 1360 if (ip_options_echo(&replyopts.opt, skb))
1363 return; 1361 return;
@@ -1384,7 +1382,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1384 .dport = tcp_hdr(skb)->source } }, 1382 .dport = tcp_hdr(skb)->source } },
1385 .proto = sk->sk_protocol }; 1383 .proto = sk->sk_protocol };
1386 security_skb_classify_flow(skb, &fl); 1384 security_skb_classify_flow(skb, &fl);
1387 if (ip_route_output_key(sk->sk_net, &rt, &fl)) 1385 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1388 return; 1386 return;
1389 } 1387 }
1390 1388
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c2921d01e925..d8adfd4972e2 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -57,7 +57,7 @@
57static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) 57static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
58{ 58{
59 struct in_pktinfo info; 59 struct in_pktinfo info;
60 struct rtable *rt = (struct rtable *)skb->dst; 60 struct rtable *rt = skb->rtable;
61 61
62 info.ipi_addr.s_addr = ip_hdr(skb)->daddr; 62 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
63 if (rt) { 63 if (rt) {
@@ -163,7 +163,7 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
163 ip_cmsg_recv_security(msg, skb); 163 ip_cmsg_recv_security(msg, skb);
164} 164}
165 165
166int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc) 166int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
167{ 167{
168 int err; 168 int err;
169 struct cmsghdr *cmsg; 169 struct cmsghdr *cmsg;
@@ -176,7 +176,7 @@ int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc)
176 switch (cmsg->cmsg_type) { 176 switch (cmsg->cmsg_type) {
177 case IP_RETOPTS: 177 case IP_RETOPTS:
178 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 178 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
179 err = ip_options_get(&ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40); 179 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40);
180 if (err) 180 if (err)
181 return err; 181 return err;
182 break; 182 break;
@@ -449,7 +449,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
449 struct ip_options * opt = NULL; 449 struct ip_options * opt = NULL;
450 if (optlen > 40 || optlen < 0) 450 if (optlen > 40 || optlen < 0)
451 goto e_inval; 451 goto e_inval;
452 err = ip_options_get_from_user(&opt, optval, optlen); 452 err = ip_options_get_from_user(sock_net(sk), &opt,
453 optval, optlen);
453 if (err) 454 if (err)
454 break; 455 break;
455 if (inet->is_icsk) { 456 if (inet->is_icsk) {
@@ -589,13 +590,13 @@ static int do_ip_setsockopt(struct sock *sk, int level,
589 err = 0; 590 err = 0;
590 break; 591 break;
591 } 592 }
592 dev = ip_dev_find(&init_net, mreq.imr_address.s_addr); 593 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
593 if (dev) { 594 if (dev) {
594 mreq.imr_ifindex = dev->ifindex; 595 mreq.imr_ifindex = dev->ifindex;
595 dev_put(dev); 596 dev_put(dev);
596 } 597 }
597 } else 598 } else
598 dev = __dev_get_by_index(&init_net, mreq.imr_ifindex); 599 dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
599 600
600 601
601 err = -EADDRNOTAVAIL; 602 err = -EADDRNOTAVAIL;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 58b60b2fb011..fb53ddfea5b5 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -179,7 +179,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
179 spi, IPPROTO_COMP, AF_INET); 179 spi, IPPROTO_COMP, AF_INET);
180 if (!x) 180 if (!x)
181 return; 181 return;
182 NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n", 182 NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/" NIPQUAD_FMT "\n",
183 spi, NIPQUAD(iph->daddr)); 183 spi, NIPQUAD(iph->daddr));
184 xfrm_state_put(x); 184 xfrm_state_put(x);
185} 185}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 4824fe8996bf..0f42d1c1f690 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -292,7 +292,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
292 292
293 mm_segment_t oldfs = get_fs(); 293 mm_segment_t oldfs = get_fs();
294 set_fs(get_ds()); 294 set_fs(get_ds());
295 res = devinet_ioctl(cmd, (struct ifreq __user *) arg); 295 res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
296 set_fs(oldfs); 296 set_fs(oldfs);
297 return res; 297 return res;
298} 298}
@@ -376,7 +376,7 @@ static int __init ic_defaults(void)
376 */ 376 */
377 377
378 if (!ic_host_name_set) 378 if (!ic_host_name_set)
379 sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr)); 379 sprintf(init_utsname()->nodename, NIPQUAD_FMT, NIPQUAD(ic_myaddr));
380 380
381 if (root_server_addr == NONE) 381 if (root_server_addr == NONE)
382 root_server_addr = ic_servaddr; 382 root_server_addr = ic_servaddr;
@@ -389,11 +389,11 @@ static int __init ic_defaults(void)
389 else if (IN_CLASSC(ntohl(ic_myaddr))) 389 else if (IN_CLASSC(ntohl(ic_myaddr)))
390 ic_netmask = htonl(IN_CLASSC_NET); 390 ic_netmask = htonl(IN_CLASSC_NET);
391 else { 391 else {
392 printk(KERN_ERR "IP-Config: Unable to guess netmask for address %u.%u.%u.%u\n", 392 printk(KERN_ERR "IP-Config: Unable to guess netmask for address " NIPQUAD_FMT "\n",
393 NIPQUAD(ic_myaddr)); 393 NIPQUAD(ic_myaddr));
394 return -1; 394 return -1;
395 } 395 }
396 printk("IP-Config: Guessing netmask %u.%u.%u.%u\n", NIPQUAD(ic_netmask)); 396 printk("IP-Config: Guessing netmask " NIPQUAD_FMT "\n", NIPQUAD(ic_netmask));
397 } 397 }
398 398
399 return 0; 399 return 0;
@@ -434,7 +434,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
434 unsigned char *sha, *tha; /* s for "source", t for "target" */ 434 unsigned char *sha, *tha; /* s for "source", t for "target" */
435 struct ic_device *d; 435 struct ic_device *d;
436 436
437 if (dev->nd_net != &init_net) 437 if (dev_net(dev) != &init_net)
438 goto drop; 438 goto drop;
439 439
440 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 440 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
@@ -460,10 +460,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
460 if (rarp->ar_pro != htons(ETH_P_IP)) 460 if (rarp->ar_pro != htons(ETH_P_IP))
461 goto drop; 461 goto drop;
462 462
463 if (!pskb_may_pull(skb, 463 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
464 sizeof(struct arphdr) +
465 (2 * dev->addr_len) +
466 (2 * 4)))
467 goto drop; 464 goto drop;
468 465
469 /* OK, it is all there and looks valid, process... */ 466 /* OK, it is all there and looks valid, process... */
@@ -857,7 +854,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
857 struct ic_device *d; 854 struct ic_device *d;
858 int len, ext_len; 855 int len, ext_len;
859 856
860 if (dev->nd_net != &init_net) 857 if (dev_net(dev) != &init_net)
861 goto drop; 858 goto drop;
862 859
863 /* Perform verifications before taking the lock. */ 860 /* Perform verifications before taking the lock. */
@@ -984,9 +981,9 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
984 ic_myaddr = b->your_ip; 981 ic_myaddr = b->your_ip;
985 ic_servaddr = server_id; 982 ic_servaddr = server_id;
986#ifdef IPCONFIG_DEBUG 983#ifdef IPCONFIG_DEBUG
987 printk("DHCP: Offered address %u.%u.%u.%u", 984 printk("DHCP: Offered address " NIPQUAD_FMT,
988 NIPQUAD(ic_myaddr)); 985 NIPQUAD(ic_myaddr));
989 printk(" by server %u.%u.%u.%u\n", 986 printk(" by server " NIPQUAD_FMT "\n",
990 NIPQUAD(ic_servaddr)); 987 NIPQUAD(ic_servaddr));
991#endif 988#endif
992 /* The DHCP indicated server address takes 989 /* The DHCP indicated server address takes
@@ -1182,11 +1179,11 @@ static int __init ic_dynamic(void)
1182 return -1; 1179 return -1;
1183 } 1180 }
1184 1181
1185 printk("IP-Config: Got %s answer from %u.%u.%u.%u, ", 1182 printk("IP-Config: Got %s answer from " NIPQUAD_FMT ", ",
1186 ((ic_got_reply & IC_RARP) ? "RARP" 1183 ((ic_got_reply & IC_RARP) ? "RARP"
1187 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1184 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1188 NIPQUAD(ic_servaddr)); 1185 NIPQUAD(ic_servaddr));
1189 printk("my address is %u.%u.%u.%u\n", NIPQUAD(ic_myaddr)); 1186 printk("my address is " NIPQUAD_FMT "\n", NIPQUAD(ic_myaddr));
1190 1187
1191 return 0; 1188 return 0;
1192} 1189}
@@ -1212,12 +1209,12 @@ static int pnp_seq_show(struct seq_file *seq, void *v)
1212 for (i = 0; i < CONF_NAMESERVERS_MAX; i++) { 1209 for (i = 0; i < CONF_NAMESERVERS_MAX; i++) {
1213 if (ic_nameservers[i] != NONE) 1210 if (ic_nameservers[i] != NONE)
1214 seq_printf(seq, 1211 seq_printf(seq,
1215 "nameserver %u.%u.%u.%u\n", 1212 "nameserver " NIPQUAD_FMT "\n",
1216 NIPQUAD(ic_nameservers[i])); 1213 NIPQUAD(ic_nameservers[i]));
1217 } 1214 }
1218 if (ic_servaddr != NONE) 1215 if (ic_servaddr != NONE)
1219 seq_printf(seq, 1216 seq_printf(seq,
1220 "bootserver %u.%u.%u.%u\n", 1217 "bootserver " NIPQUAD_FMT "\n",
1221 NIPQUAD(ic_servaddr)); 1218 NIPQUAD(ic_servaddr));
1222 return 0; 1219 return 0;
1223} 1220}
@@ -1392,13 +1389,13 @@ static int __init ip_auto_config(void)
1392 */ 1389 */
1393 printk("IP-Config: Complete:"); 1390 printk("IP-Config: Complete:");
1394 printk("\n device=%s", ic_dev->name); 1391 printk("\n device=%s", ic_dev->name);
1395 printk(", addr=%u.%u.%u.%u", NIPQUAD(ic_myaddr)); 1392 printk(", addr=" NIPQUAD_FMT, NIPQUAD(ic_myaddr));
1396 printk(", mask=%u.%u.%u.%u", NIPQUAD(ic_netmask)); 1393 printk(", mask=" NIPQUAD_FMT, NIPQUAD(ic_netmask));
1397 printk(", gw=%u.%u.%u.%u", NIPQUAD(ic_gateway)); 1394 printk(", gw=" NIPQUAD_FMT, NIPQUAD(ic_gateway));
1398 printk(",\n host=%s, domain=%s, nis-domain=%s", 1395 printk(",\n host=%s, domain=%s, nis-domain=%s",
1399 utsname()->nodename, ic_domain, utsname()->domainname); 1396 utsname()->nodename, ic_domain, utsname()->domainname);
1400 printk(",\n bootserver=%u.%u.%u.%u", NIPQUAD(ic_servaddr)); 1397 printk(",\n bootserver=" NIPQUAD_FMT, NIPQUAD(ic_servaddr));
1401 printk(", rootserver=%u.%u.%u.%u", NIPQUAD(root_server_addr)); 1398 printk(", rootserver=" NIPQUAD_FMT, NIPQUAD(root_server_addr));
1402 printk(", rootpath=%s", root_server_path); 1399 printk(", rootpath=%s", root_server_path);
1403 printk("\n"); 1400 printk("\n");
1404#endif /* !SILENT */ 1401#endif /* !SILENT */
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index dbaed69de06a..149111f08e8d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -115,49 +115,57 @@
115#include <net/ipip.h> 115#include <net/ipip.h>
116#include <net/inet_ecn.h> 116#include <net/inet_ecn.h>
117#include <net/xfrm.h> 117#include <net/xfrm.h>
118#include <net/net_namespace.h>
119#include <net/netns/generic.h>
118 120
119#define HASH_SIZE 16 121#define HASH_SIZE 16
120#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 122#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
121 123
124static int ipip_net_id;
125struct ipip_net {
126 struct ip_tunnel *tunnels_r_l[HASH_SIZE];
127 struct ip_tunnel *tunnels_r[HASH_SIZE];
128 struct ip_tunnel *tunnels_l[HASH_SIZE];
129 struct ip_tunnel *tunnels_wc[1];
130 struct ip_tunnel **tunnels[4];
131
132 struct net_device *fb_tunnel_dev;
133};
134
122static int ipip_fb_tunnel_init(struct net_device *dev); 135static int ipip_fb_tunnel_init(struct net_device *dev);
123static int ipip_tunnel_init(struct net_device *dev); 136static int ipip_tunnel_init(struct net_device *dev);
124static void ipip_tunnel_setup(struct net_device *dev); 137static void ipip_tunnel_setup(struct net_device *dev);
125 138
126static struct net_device *ipip_fb_tunnel_dev;
127
128static struct ip_tunnel *tunnels_r_l[HASH_SIZE];
129static struct ip_tunnel *tunnels_r[HASH_SIZE];
130static struct ip_tunnel *tunnels_l[HASH_SIZE];
131static struct ip_tunnel *tunnels_wc[1];
132static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunnels_r_l };
133
134static DEFINE_RWLOCK(ipip_lock); 139static DEFINE_RWLOCK(ipip_lock);
135 140
136static struct ip_tunnel * ipip_tunnel_lookup(__be32 remote, __be32 local) 141static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
142 __be32 remote, __be32 local)
137{ 143{
138 unsigned h0 = HASH(remote); 144 unsigned h0 = HASH(remote);
139 unsigned h1 = HASH(local); 145 unsigned h1 = HASH(local);
140 struct ip_tunnel *t; 146 struct ip_tunnel *t;
147 struct ipip_net *ipn = net_generic(net, ipip_net_id);
141 148
142 for (t = tunnels_r_l[h0^h1]; t; t = t->next) { 149 for (t = ipn->tunnels_r_l[h0^h1]; t; t = t->next) {
143 if (local == t->parms.iph.saddr && 150 if (local == t->parms.iph.saddr &&
144 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 151 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
145 return t; 152 return t;
146 } 153 }
147 for (t = tunnels_r[h0]; t; t = t->next) { 154 for (t = ipn->tunnels_r[h0]; t; t = t->next) {
148 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 155 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
149 return t; 156 return t;
150 } 157 }
151 for (t = tunnels_l[h1]; t; t = t->next) { 158 for (t = ipn->tunnels_l[h1]; t; t = t->next) {
152 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) 159 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
153 return t; 160 return t;
154 } 161 }
155 if ((t = tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP)) 162 if ((t = ipn->tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP))
156 return t; 163 return t;
157 return NULL; 164 return NULL;
158} 165}
159 166
160static struct ip_tunnel **__ipip_bucket(struct ip_tunnel_parm *parms) 167static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
168 struct ip_tunnel_parm *parms)
161{ 169{
162 __be32 remote = parms->iph.daddr; 170 __be32 remote = parms->iph.daddr;
163 __be32 local = parms->iph.saddr; 171 __be32 local = parms->iph.saddr;
@@ -172,19 +180,20 @@ static struct ip_tunnel **__ipip_bucket(struct ip_tunnel_parm *parms)
172 prio |= 1; 180 prio |= 1;
173 h ^= HASH(local); 181 h ^= HASH(local);
174 } 182 }
175 return &tunnels[prio][h]; 183 return &ipn->tunnels[prio][h];
176} 184}
177 185
178static inline struct ip_tunnel **ipip_bucket(struct ip_tunnel *t) 186static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
187 struct ip_tunnel *t)
179{ 188{
180 return __ipip_bucket(&t->parms); 189 return __ipip_bucket(ipn, &t->parms);
181} 190}
182 191
183static void ipip_tunnel_unlink(struct ip_tunnel *t) 192static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
184{ 193{
185 struct ip_tunnel **tp; 194 struct ip_tunnel **tp;
186 195
187 for (tp = ipip_bucket(t); *tp; tp = &(*tp)->next) { 196 for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) {
188 if (t == *tp) { 197 if (t == *tp) {
189 write_lock_bh(&ipip_lock); 198 write_lock_bh(&ipip_lock);
190 *tp = t->next; 199 *tp = t->next;
@@ -194,9 +203,9 @@ static void ipip_tunnel_unlink(struct ip_tunnel *t)
194 } 203 }
195} 204}
196 205
197static void ipip_tunnel_link(struct ip_tunnel *t) 206static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
198{ 207{
199 struct ip_tunnel **tp = ipip_bucket(t); 208 struct ip_tunnel **tp = ipip_bucket(ipn, t);
200 209
201 t->next = *tp; 210 t->next = *tp;
202 write_lock_bh(&ipip_lock); 211 write_lock_bh(&ipip_lock);
@@ -204,15 +213,17 @@ static void ipip_tunnel_link(struct ip_tunnel *t)
204 write_unlock_bh(&ipip_lock); 213 write_unlock_bh(&ipip_lock);
205} 214}
206 215
207static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create) 216static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
217 struct ip_tunnel_parm *parms, int create)
208{ 218{
209 __be32 remote = parms->iph.daddr; 219 __be32 remote = parms->iph.daddr;
210 __be32 local = parms->iph.saddr; 220 __be32 local = parms->iph.saddr;
211 struct ip_tunnel *t, **tp, *nt; 221 struct ip_tunnel *t, **tp, *nt;
212 struct net_device *dev; 222 struct net_device *dev;
213 char name[IFNAMSIZ]; 223 char name[IFNAMSIZ];
224 struct ipip_net *ipn = net_generic(net, ipip_net_id);
214 225
215 for (tp = __ipip_bucket(parms); (t = *tp) != NULL; tp = &t->next) { 226 for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) {
216 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) 227 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
217 return t; 228 return t;
218 } 229 }
@@ -228,6 +239,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c
228 if (dev == NULL) 239 if (dev == NULL)
229 return NULL; 240 return NULL;
230 241
242 dev_net_set(dev, net);
243
231 if (strchr(name, '%')) { 244 if (strchr(name, '%')) {
232 if (dev_alloc_name(dev, name) < 0) 245 if (dev_alloc_name(dev, name) < 0)
233 goto failed_free; 246 goto failed_free;
@@ -241,7 +254,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c
241 goto failed_free; 254 goto failed_free;
242 255
243 dev_hold(dev); 256 dev_hold(dev);
244 ipip_tunnel_link(nt); 257 ipip_tunnel_link(ipn, nt);
245 return nt; 258 return nt;
246 259
247failed_free: 260failed_free:
@@ -251,12 +264,15 @@ failed_free:
251 264
252static void ipip_tunnel_uninit(struct net_device *dev) 265static void ipip_tunnel_uninit(struct net_device *dev)
253{ 266{
254 if (dev == ipip_fb_tunnel_dev) { 267 struct net *net = dev_net(dev);
268 struct ipip_net *ipn = net_generic(net, ipip_net_id);
269
270 if (dev == ipn->fb_tunnel_dev) {
255 write_lock_bh(&ipip_lock); 271 write_lock_bh(&ipip_lock);
256 tunnels_wc[0] = NULL; 272 ipn->tunnels_wc[0] = NULL;
257 write_unlock_bh(&ipip_lock); 273 write_unlock_bh(&ipip_lock);
258 } else 274 } else
259 ipip_tunnel_unlink(netdev_priv(dev)); 275 ipip_tunnel_unlink(ipn, netdev_priv(dev));
260 dev_put(dev); 276 dev_put(dev);
261} 277}
262 278
@@ -305,7 +321,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
305 err = -ENOENT; 321 err = -ENOENT;
306 322
307 read_lock(&ipip_lock); 323 read_lock(&ipip_lock);
308 t = ipip_tunnel_lookup(iph->daddr, iph->saddr); 324 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
309 if (t == NULL || t->parms.iph.daddr == 0) 325 if (t == NULL || t->parms.iph.daddr == 0)
310 goto out; 326 goto out;
311 327
@@ -401,7 +417,7 @@ out:
401 fl.fl4_daddr = eiph->saddr; 417 fl.fl4_daddr = eiph->saddr;
402 fl.fl4_tos = RT_TOS(eiph->tos); 418 fl.fl4_tos = RT_TOS(eiph->tos);
403 fl.proto = IPPROTO_IPIP; 419 fl.proto = IPPROTO_IPIP;
404 if (ip_route_output_key(&init_net, &rt, &key)) { 420 if (ip_route_output_key(dev_net(skb->dev), &rt, &key)) {
405 kfree_skb(skb2); 421 kfree_skb(skb2);
406 return 0; 422 return 0;
407 } 423 }
@@ -414,7 +430,7 @@ out:
414 fl.fl4_daddr = eiph->daddr; 430 fl.fl4_daddr = eiph->daddr;
415 fl.fl4_src = eiph->saddr; 431 fl.fl4_src = eiph->saddr;
416 fl.fl4_tos = eiph->tos; 432 fl.fl4_tos = eiph->tos;
417 if (ip_route_output_key(&init_net, &rt, &fl) || 433 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
418 rt->u.dst.dev->type != ARPHRD_TUNNEL) { 434 rt->u.dst.dev->type != ARPHRD_TUNNEL) {
419 ip_rt_put(rt); 435 ip_rt_put(rt);
420 kfree_skb(skb2); 436 kfree_skb(skb2);
@@ -465,7 +481,8 @@ static int ipip_rcv(struct sk_buff *skb)
465 const struct iphdr *iph = ip_hdr(skb); 481 const struct iphdr *iph = ip_hdr(skb);
466 482
467 read_lock(&ipip_lock); 483 read_lock(&ipip_lock);
468 if ((tunnel = ipip_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) { 484 if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev),
485 iph->saddr, iph->daddr)) != NULL) {
469 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 486 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
470 read_unlock(&ipip_lock); 487 read_unlock(&ipip_lock);
471 kfree_skb(skb); 488 kfree_skb(skb);
@@ -528,7 +545,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
528 545
529 if (!dst) { 546 if (!dst) {
530 /* NBMA tunnel */ 547 /* NBMA tunnel */
531 if ((rt = (struct rtable*)skb->dst) == NULL) { 548 if ((rt = skb->rtable) == NULL) {
532 tunnel->stat.tx_fifo_errors++; 549 tunnel->stat.tx_fifo_errors++;
533 goto tx_error; 550 goto tx_error;
534 } 551 }
@@ -543,7 +560,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
543 .saddr = tiph->saddr, 560 .saddr = tiph->saddr,
544 .tos = RT_TOS(tos) } }, 561 .tos = RT_TOS(tos) } },
545 .proto = IPPROTO_IPIP }; 562 .proto = IPPROTO_IPIP };
546 if (ip_route_output_key(&init_net, &rt, &fl)) { 563 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
547 tunnel->stat.tx_carrier_errors++; 564 tunnel->stat.tx_carrier_errors++;
548 goto tx_error_icmp; 565 goto tx_error_icmp;
549 } 566 }
@@ -664,7 +681,7 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
664 .tos = RT_TOS(iph->tos) } }, 681 .tos = RT_TOS(iph->tos) } },
665 .proto = IPPROTO_IPIP }; 682 .proto = IPPROTO_IPIP };
666 struct rtable *rt; 683 struct rtable *rt;
667 if (!ip_route_output_key(&init_net, &rt, &fl)) { 684 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
668 tdev = rt->u.dst.dev; 685 tdev = rt->u.dst.dev;
669 ip_rt_put(rt); 686 ip_rt_put(rt);
670 } 687 }
@@ -672,7 +689,7 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
672 } 689 }
673 690
674 if (!tdev && tunnel->parms.link) 691 if (!tdev && tunnel->parms.link)
675 tdev = __dev_get_by_index(&init_net, tunnel->parms.link); 692 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
676 693
677 if (tdev) { 694 if (tdev) {
678 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); 695 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
@@ -687,16 +704,18 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
687 int err = 0; 704 int err = 0;
688 struct ip_tunnel_parm p; 705 struct ip_tunnel_parm p;
689 struct ip_tunnel *t; 706 struct ip_tunnel *t;
707 struct net *net = dev_net(dev);
708 struct ipip_net *ipn = net_generic(net, ipip_net_id);
690 709
691 switch (cmd) { 710 switch (cmd) {
692 case SIOCGETTUNNEL: 711 case SIOCGETTUNNEL:
693 t = NULL; 712 t = NULL;
694 if (dev == ipip_fb_tunnel_dev) { 713 if (dev == ipn->fb_tunnel_dev) {
695 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 714 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
696 err = -EFAULT; 715 err = -EFAULT;
697 break; 716 break;
698 } 717 }
699 t = ipip_tunnel_locate(&p, 0); 718 t = ipip_tunnel_locate(net, &p, 0);
700 } 719 }
701 if (t == NULL) 720 if (t == NULL)
702 t = netdev_priv(dev); 721 t = netdev_priv(dev);
@@ -722,9 +741,9 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
722 if (p.iph.ttl) 741 if (p.iph.ttl)
723 p.iph.frag_off |= htons(IP_DF); 742 p.iph.frag_off |= htons(IP_DF);
724 743
725 t = ipip_tunnel_locate(&p, cmd == SIOCADDTUNNEL); 744 t = ipip_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
726 745
727 if (dev != ipip_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 746 if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
728 if (t != NULL) { 747 if (t != NULL) {
729 if (t->dev != dev) { 748 if (t->dev != dev) {
730 err = -EEXIST; 749 err = -EEXIST;
@@ -737,12 +756,12 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
737 break; 756 break;
738 } 757 }
739 t = netdev_priv(dev); 758 t = netdev_priv(dev);
740 ipip_tunnel_unlink(t); 759 ipip_tunnel_unlink(ipn, t);
741 t->parms.iph.saddr = p.iph.saddr; 760 t->parms.iph.saddr = p.iph.saddr;
742 t->parms.iph.daddr = p.iph.daddr; 761 t->parms.iph.daddr = p.iph.daddr;
743 memcpy(dev->dev_addr, &p.iph.saddr, 4); 762 memcpy(dev->dev_addr, &p.iph.saddr, 4);
744 memcpy(dev->broadcast, &p.iph.daddr, 4); 763 memcpy(dev->broadcast, &p.iph.daddr, 4);
745 ipip_tunnel_link(t); 764 ipip_tunnel_link(ipn, t);
746 netdev_state_change(dev); 765 netdev_state_change(dev);
747 } 766 }
748 } 767 }
@@ -770,15 +789,15 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
770 if (!capable(CAP_NET_ADMIN)) 789 if (!capable(CAP_NET_ADMIN))
771 goto done; 790 goto done;
772 791
773 if (dev == ipip_fb_tunnel_dev) { 792 if (dev == ipn->fb_tunnel_dev) {
774 err = -EFAULT; 793 err = -EFAULT;
775 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 794 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
776 goto done; 795 goto done;
777 err = -ENOENT; 796 err = -ENOENT;
778 if ((t = ipip_tunnel_locate(&p, 0)) == NULL) 797 if ((t = ipip_tunnel_locate(net, &p, 0)) == NULL)
779 goto done; 798 goto done;
780 err = -EPERM; 799 err = -EPERM;
781 if (t->dev == ipip_fb_tunnel_dev) 800 if (t->dev == ipn->fb_tunnel_dev)
782 goto done; 801 goto done;
783 dev = t->dev; 802 dev = t->dev;
784 } 803 }
@@ -822,6 +841,7 @@ static void ipip_tunnel_setup(struct net_device *dev)
822 dev->flags = IFF_NOARP; 841 dev->flags = IFF_NOARP;
823 dev->iflink = 0; 842 dev->iflink = 0;
824 dev->addr_len = 4; 843 dev->addr_len = 4;
844 dev->features |= NETIF_F_NETNS_LOCAL;
825} 845}
826 846
827static int ipip_tunnel_init(struct net_device *dev) 847static int ipip_tunnel_init(struct net_device *dev)
@@ -841,10 +861,11 @@ static int ipip_tunnel_init(struct net_device *dev)
841 return 0; 861 return 0;
842} 862}
843 863
844static int __init ipip_fb_tunnel_init(struct net_device *dev) 864static int ipip_fb_tunnel_init(struct net_device *dev)
845{ 865{
846 struct ip_tunnel *tunnel = netdev_priv(dev); 866 struct ip_tunnel *tunnel = netdev_priv(dev);
847 struct iphdr *iph = &tunnel->parms.iph; 867 struct iphdr *iph = &tunnel->parms.iph;
868 struct ipip_net *ipn = net_generic(dev_net(dev), ipip_net_id);
848 869
849 tunnel->dev = dev; 870 tunnel->dev = dev;
850 strcpy(tunnel->parms.name, dev->name); 871 strcpy(tunnel->parms.name, dev->name);
@@ -854,7 +875,7 @@ static int __init ipip_fb_tunnel_init(struct net_device *dev)
854 iph->ihl = 5; 875 iph->ihl = 5;
855 876
856 dev_hold(dev); 877 dev_hold(dev);
857 tunnels_wc[0] = tunnel; 878 ipn->tunnels_wc[0] = tunnel;
858 return 0; 879 return 0;
859} 880}
860 881
@@ -867,50 +888,98 @@ static struct xfrm_tunnel ipip_handler = {
867static char banner[] __initdata = 888static char banner[] __initdata =
868 KERN_INFO "IPv4 over IPv4 tunneling driver\n"; 889 KERN_INFO "IPv4 over IPv4 tunneling driver\n";
869 890
870static int __init ipip_init(void) 891static void ipip_destroy_tunnels(struct ipip_net *ipn)
892{
893 int prio;
894
895 for (prio = 1; prio < 4; prio++) {
896 int h;
897 for (h = 0; h < HASH_SIZE; h++) {
898 struct ip_tunnel *t;
899 while ((t = ipn->tunnels[prio][h]) != NULL)
900 unregister_netdevice(t->dev);
901 }
902 }
903}
904
905static int ipip_init_net(struct net *net)
871{ 906{
872 int err; 907 int err;
908 struct ipip_net *ipn;
873 909
874 printk(banner); 910 err = -ENOMEM;
911 ipn = kzalloc(sizeof(struct ipip_net), GFP_KERNEL);
912 if (ipn == NULL)
913 goto err_alloc;
875 914
876 if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) { 915 err = net_assign_generic(net, ipip_net_id, ipn);
877 printk(KERN_INFO "ipip init: can't register tunnel\n"); 916 if (err < 0)
878 return -EAGAIN; 917 goto err_assign;
879 }
880 918
881 ipip_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), 919 ipn->tunnels[0] = ipn->tunnels_wc;
920 ipn->tunnels[1] = ipn->tunnels_l;
921 ipn->tunnels[2] = ipn->tunnels_r;
922 ipn->tunnels[3] = ipn->tunnels_r_l;
923
924 ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
882 "tunl0", 925 "tunl0",
883 ipip_tunnel_setup); 926 ipip_tunnel_setup);
884 if (!ipip_fb_tunnel_dev) { 927 if (!ipn->fb_tunnel_dev) {
885 err = -ENOMEM; 928 err = -ENOMEM;
886 goto err1; 929 goto err_alloc_dev;
887 } 930 }
888 931
889 ipip_fb_tunnel_dev->init = ipip_fb_tunnel_init; 932 ipn->fb_tunnel_dev->init = ipip_fb_tunnel_init;
933 dev_net_set(ipn->fb_tunnel_dev, net);
934
935 if ((err = register_netdev(ipn->fb_tunnel_dev)))
936 goto err_reg_dev;
937
938 return 0;
890 939
891 if ((err = register_netdev(ipip_fb_tunnel_dev))) 940err_reg_dev:
892 goto err2; 941 free_netdev(ipn->fb_tunnel_dev);
893 out: 942err_alloc_dev:
943 /* nothing */
944err_assign:
945 kfree(ipn);
946err_alloc:
894 return err; 947 return err;
895 err2:
896 free_netdev(ipip_fb_tunnel_dev);
897 err1:
898 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
899 goto out;
900} 948}
901 949
902static void __exit ipip_destroy_tunnels(void) 950static void ipip_exit_net(struct net *net)
903{ 951{
904 int prio; 952 struct ipip_net *ipn;
905 953
906 for (prio = 1; prio < 4; prio++) { 954 ipn = net_generic(net, ipip_net_id);
907 int h; 955 rtnl_lock();
908 for (h = 0; h < HASH_SIZE; h++) { 956 ipip_destroy_tunnels(ipn);
909 struct ip_tunnel *t; 957 unregister_netdevice(ipn->fb_tunnel_dev);
910 while ((t = tunnels[prio][h]) != NULL) 958 rtnl_unlock();
911 unregister_netdevice(t->dev); 959 kfree(ipn);
912 } 960}
961
962static struct pernet_operations ipip_net_ops = {
963 .init = ipip_init_net,
964 .exit = ipip_exit_net,
965};
966
967static int __init ipip_init(void)
968{
969 int err;
970
971 printk(banner);
972
973 if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) {
974 printk(KERN_INFO "ipip init: can't register tunnel\n");
975 return -EAGAIN;
913 } 976 }
977
978 err = register_pernet_gen_device(&ipip_net_id, &ipip_net_ops);
979 if (err)
980 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
981
982 return err;
914} 983}
915 984
916static void __exit ipip_fini(void) 985static void __exit ipip_fini(void)
@@ -918,10 +987,7 @@ static void __exit ipip_fini(void)
918 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) 987 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
919 printk(KERN_INFO "ipip close: can't deregister tunnel\n"); 988 printk(KERN_INFO "ipip close: can't deregister tunnel\n");
920 989
921 rtnl_lock(); 990 unregister_pernet_gen_device(ipip_net_id, &ipip_net_ops);
922 ipip_destroy_tunnels();
923 unregister_netdevice(ipip_fb_tunnel_dev);
924 rtnl_unlock();
925} 991}
926 992
927module_init(ipip_init); 993module_init(ipip_init);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a94f52c207a7..11700a4dcd95 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -849,7 +849,7 @@ static void mrtsock_destruct(struct sock *sk)
849{ 849{
850 rtnl_lock(); 850 rtnl_lock();
851 if (sk == mroute_socket) { 851 if (sk == mroute_socket) {
852 IPV4_DEVCONF_ALL(sk->sk_net, MC_FORWARDING)--; 852 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
853 853
854 write_lock_bh(&mrt_lock); 854 write_lock_bh(&mrt_lock);
855 mroute_socket=NULL; 855 mroute_socket=NULL;
@@ -898,7 +898,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
898 mroute_socket=sk; 898 mroute_socket=sk;
899 write_unlock_bh(&mrt_lock); 899 write_unlock_bh(&mrt_lock);
900 900
901 IPV4_DEVCONF_ALL(sk->sk_net, MC_FORWARDING)++; 901 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
902 } 902 }
903 rtnl_unlock(); 903 rtnl_unlock();
904 return ret; 904 return ret;
@@ -1089,7 +1089,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1089 struct vif_device *v; 1089 struct vif_device *v;
1090 int ct; 1090 int ct;
1091 1091
1092 if (dev->nd_net != &init_net) 1092 if (dev_net(dev) != &init_net)
1093 return NOTIFY_DONE; 1093 return NOTIFY_DONE;
1094 1094
1095 if (event != NETDEV_UNREGISTER) 1095 if (event != NETDEV_UNREGISTER)
@@ -1283,7 +1283,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1283 if (vif_table[vif].dev != skb->dev) { 1283 if (vif_table[vif].dev != skb->dev) {
1284 int true_vifi; 1284 int true_vifi;
1285 1285
1286 if (((struct rtable*)skb->dst)->fl.iif == 0) { 1286 if (skb->rtable->fl.iif == 0) {
1287 /* It is our own packet, looped back. 1287 /* It is our own packet, looped back.
1288 Very complicated situation... 1288 Very complicated situation...
1289 1289
@@ -1357,7 +1357,7 @@ dont_forward:
1357int ip_mr_input(struct sk_buff *skb) 1357int ip_mr_input(struct sk_buff *skb)
1358{ 1358{
1359 struct mfc_cache *cache; 1359 struct mfc_cache *cache;
1360 int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL; 1360 int local = skb->rtable->rt_flags&RTCF_LOCAL;
1361 1361
1362 /* Packet is looped back after forward, it should not be 1362 /* Packet is looped back after forward, it should not be
1363 forwarded second time, but still can be delivered locally. 1363 forwarded second time, but still can be delivered locally.
@@ -1594,7 +1594,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1594{ 1594{
1595 int err; 1595 int err;
1596 struct mfc_cache *cache; 1596 struct mfc_cache *cache;
1597 struct rtable *rt = (struct rtable*)skb->dst; 1597 struct rtable *rt = skb->rtable;
1598 1598
1599 read_lock(&mrt_lock); 1599 read_lock(&mrt_lock);
1600 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1600 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index 12dc0d640b6d..620e40ff79a9 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -550,7 +550,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
550 550
551 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" 551 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->"
552 "%u.%u.%u.%u:%u to app %s on port %u\n", 552 "%u.%u.%u.%u:%u to app %s on port %u\n",
553 __FUNCTION__, 553 __func__,
554 NIPQUAD(cp->caddr), ntohs(cp->cport), 554 NIPQUAD(cp->caddr), ntohs(cp->cport),
555 NIPQUAD(cp->vaddr), ntohs(cp->vport), 555 NIPQUAD(cp->vaddr), ntohs(cp->vport),
556 inc->name, ntohs(inc->port)); 556 inc->name, ntohs(inc->port));
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index 1fa7b330b9ac..1caa2908373f 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -344,7 +344,7 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
344 344
345 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" 345 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->"
346 "%u.%u.%u.%u:%u to app %s on port %u\n", 346 "%u.%u.%u.%u:%u to app %s on port %u\n",
347 __FUNCTION__, 347 __func__,
348 NIPQUAD(cp->caddr), ntohs(cp->cport), 348 NIPQUAD(cp->caddr), ntohs(cp->cport),
349 NIPQUAD(cp->vaddr), ntohs(cp->vport), 349 NIPQUAD(cp->vaddr), ntohs(cp->vport),
350 inc->name, ntohs(inc->port)); 350 inc->name, ntohs(inc->port));
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 948378d0a755..69c56663cc9a 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -916,7 +916,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
916 if (!tinfo) 916 if (!tinfo)
917 return -ENOMEM; 917 return -ENOMEM;
918 918
919 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, task_pid_nr(current)); 919 IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current));
920 IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", 920 IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
921 sizeof(struct ip_vs_sync_conn)); 921 sizeof(struct ip_vs_sync_conn));
922 922
@@ -956,7 +956,7 @@ int stop_sync_thread(int state)
956 (state == IP_VS_STATE_BACKUP && !sync_backup_pid)) 956 (state == IP_VS_STATE_BACKUP && !sync_backup_pid))
957 return -ESRCH; 957 return -ESRCH;
958 958
959 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, task_pid_nr(current)); 959 IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current));
960 IP_VS_INFO("stopping sync thread %d ...\n", 960 IP_VS_INFO("stopping sync thread %d ...\n",
961 (state == IP_VS_STATE_MASTER) ? 961 (state == IP_VS_STATE_MASTER) ?
962 sync_master_pid : sync_backup_pid); 962 sync_master_pid : sync_backup_pid);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 9a904c6c0dc8..f8edacdf991d 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -182,21 +182,44 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
182 } 182 }
183 return csum; 183 return csum;
184} 184}
185
186EXPORT_SYMBOL(nf_ip_checksum); 185EXPORT_SYMBOL(nf_ip_checksum);
187 186
187static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
188 unsigned int dataoff, unsigned int len,
189 u_int8_t protocol)
190{
191 const struct iphdr *iph = ip_hdr(skb);
192 __sum16 csum = 0;
193
194 switch (skb->ip_summed) {
195 case CHECKSUM_COMPLETE:
196 if (len == skb->len - dataoff)
197 return nf_ip_checksum(skb, hook, dataoff, protocol);
198 /* fall through */
199 case CHECKSUM_NONE:
200 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
201 skb->len - dataoff, 0);
202 skb->ip_summed = CHECKSUM_NONE;
203 csum = __skb_checksum_complete_head(skb, dataoff + len);
204 if (!csum)
205 skb->ip_summed = CHECKSUM_UNNECESSARY;
206 }
207 return csum;
208}
209
188static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) 210static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
189{ 211{
190 return ip_route_output_key(&init_net, (struct rtable **)dst, fl); 212 return ip_route_output_key(&init_net, (struct rtable **)dst, fl);
191} 213}
192 214
193static const struct nf_afinfo nf_ip_afinfo = { 215static const struct nf_afinfo nf_ip_afinfo = {
194 .family = AF_INET, 216 .family = AF_INET,
195 .checksum = nf_ip_checksum, 217 .checksum = nf_ip_checksum,
196 .route = nf_ip_route, 218 .checksum_partial = nf_ip_checksum_partial,
197 .saveroute = nf_ip_saveroute, 219 .route = nf_ip_route,
198 .reroute = nf_ip_reroute, 220 .saveroute = nf_ip_saveroute,
199 .route_key_size = sizeof(struct ip_rt_info), 221 .reroute = nf_ip_reroute,
222 .route_key_size = sizeof(struct ip_rt_info),
200}; 223};
201 224
202static int ipv4_netfilter_init(void) 225static int ipv4_netfilter_init(void)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 9a077cb24798..0c95cd5872f3 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -241,10 +241,25 @@ config NF_NAT_SNMP_BASIC
241# <expr> '&&' <expr> (6) 241# <expr> '&&' <expr> (6)
242# 242#
243# (6) Returns the result of min(/expr/, /expr/). 243# (6) Returns the result of min(/expr/, /expr/).
244config NF_NAT_PROTO_DCCP
245 tristate
246 depends on NF_NAT && NF_CT_PROTO_DCCP
247 default NF_NAT && NF_CT_PROTO_DCCP
248
244config NF_NAT_PROTO_GRE 249config NF_NAT_PROTO_GRE
245 tristate 250 tristate
246 depends on NF_NAT && NF_CT_PROTO_GRE 251 depends on NF_NAT && NF_CT_PROTO_GRE
247 252
253config NF_NAT_PROTO_UDPLITE
254 tristate
255 depends on NF_NAT && NF_CT_PROTO_UDPLITE
256 default NF_NAT && NF_CT_PROTO_UDPLITE
257
258config NF_NAT_PROTO_SCTP
259 tristate
260 default NF_NAT && NF_CT_PROTO_SCTP
261 depends on NF_NAT && NF_CT_PROTO_SCTP
262
248config NF_NAT_FTP 263config NF_NAT_FTP
249 tristate 264 tristate
250 depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT 265 depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 0c7dc78a62e9..d9b92fbf5579 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -10,7 +10,7 @@ nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
10endif 10endif
11endif 11endif
12 12
13nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o 13nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
14iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o 14iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o
15 15
16# connection tracking 16# connection tracking
@@ -29,7 +29,10 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
29obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o 29obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
30 30
31# NAT protocols (nf_nat) 31# NAT protocols (nf_nat)
32obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
32obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o 33obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
34obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
35obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
33 36
34# generic IP tables 37# generic IP tables
35obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o 38obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index a7591ce344d2..03e83a65aec5 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -52,14 +52,14 @@ MODULE_DESCRIPTION("arptables core");
52do { \ 52do { \
53 if (!(x)) \ 53 if (!(x)) \
54 printk("ARP_NF_ASSERT: %s:%s:%u\n", \ 54 printk("ARP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \ 55 __func__, __FILE__, __LINE__); \
56} while(0) 56} while(0)
57#else 57#else
58#define ARP_NF_ASSERT(x) 58#define ARP_NF_ASSERT(x)
59#endif 59#endif
60 60
61static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, 61static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
62 char *hdr_addr, int len) 62 const char *hdr_addr, int len)
63{ 63{
64 int i, ret; 64 int i, ret;
65 65
@@ -80,8 +80,8 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
80 const char *outdev, 80 const char *outdev,
81 const struct arpt_arp *arpinfo) 81 const struct arpt_arp *arpinfo)
82{ 82{
83 char *arpptr = (char *)(arphdr + 1); 83 const char *arpptr = (char *)(arphdr + 1);
84 char *src_devaddr, *tgt_devaddr; 84 const char *src_devaddr, *tgt_devaddr;
85 __be32 src_ipaddr, tgt_ipaddr; 85 __be32 src_ipaddr, tgt_ipaddr;
86 int i, ret; 86 int i, ret;
87 87
@@ -222,21 +222,18 @@ unsigned int arpt_do_table(struct sk_buff *skb,
222 unsigned int hook, 222 unsigned int hook,
223 const struct net_device *in, 223 const struct net_device *in,
224 const struct net_device *out, 224 const struct net_device *out,
225 struct arpt_table *table) 225 struct xt_table *table)
226{ 226{
227 static const char nulldevname[IFNAMSIZ]; 227 static const char nulldevname[IFNAMSIZ];
228 unsigned int verdict = NF_DROP; 228 unsigned int verdict = NF_DROP;
229 struct arphdr *arp; 229 const struct arphdr *arp;
230 bool hotdrop = false; 230 bool hotdrop = false;
231 struct arpt_entry *e, *back; 231 struct arpt_entry *e, *back;
232 const char *indev, *outdev; 232 const char *indev, *outdev;
233 void *table_base; 233 void *table_base;
234 struct xt_table_info *private; 234 const struct xt_table_info *private;
235 235
236 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 236 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
237 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
238 (2 * skb->dev->addr_len) +
239 (2 * sizeof(u32)))))
240 return NF_DROP; 237 return NF_DROP;
241 238
242 indev = in ? in->name : nulldevname; 239 indev = in ? in->name : nulldevname;
@@ -355,7 +352,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
355 e->counters.pcnt = pos; 352 e->counters.pcnt = pos;
356 353
357 for (;;) { 354 for (;;) {
358 struct arpt_standard_target *t 355 const struct arpt_standard_target *t
359 = (void *)arpt_get_target(e); 356 = (void *)arpt_get_target(e);
360 int visited = e->comefrom & (1 << hook); 357 int visited = e->comefrom & (1 << hook);
361 358
@@ -440,7 +437,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
440 437
441static inline int check_entry(struct arpt_entry *e, const char *name) 438static inline int check_entry(struct arpt_entry *e, const char *name)
442{ 439{
443 struct arpt_entry_target *t; 440 const struct arpt_entry_target *t;
444 441
445 if (!arp_checkentry(&e->arp)) { 442 if (!arp_checkentry(&e->arp)) {
446 duprintf("arp_tables: arp check failed %p %s.\n", e, name); 443 duprintf("arp_tables: arp check failed %p %s.\n", e, name);
@@ -460,7 +457,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
460static inline int check_target(struct arpt_entry *e, const char *name) 457static inline int check_target(struct arpt_entry *e, const char *name)
461{ 458{
462 struct arpt_entry_target *t; 459 struct arpt_entry_target *t;
463 struct arpt_target *target; 460 struct xt_target *target;
464 int ret; 461 int ret;
465 462
466 t = arpt_get_target(e); 463 t = arpt_get_target(e);
@@ -483,7 +480,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
483 unsigned int *i) 480 unsigned int *i)
484{ 481{
485 struct arpt_entry_target *t; 482 struct arpt_entry_target *t;
486 struct arpt_target *target; 483 struct xt_target *target;
487 int ret; 484 int ret;
488 485
489 ret = check_entry(e, name); 486 ret = check_entry(e, name);
@@ -709,11 +706,11 @@ static void get_counters(const struct xt_table_info *t,
709 } 706 }
710} 707}
711 708
712static inline struct xt_counters *alloc_counters(struct arpt_table *table) 709static inline struct xt_counters *alloc_counters(struct xt_table *table)
713{ 710{
714 unsigned int countersize; 711 unsigned int countersize;
715 struct xt_counters *counters; 712 struct xt_counters *counters;
716 struct xt_table_info *private = table->private; 713 const struct xt_table_info *private = table->private;
717 714
718 /* We need atomic snapshot of counters: rest doesn't change 715 /* We need atomic snapshot of counters: rest doesn't change
719 * (other than comefrom, which userspace doesn't care 716 * (other than comefrom, which userspace doesn't care
@@ -734,7 +731,7 @@ static inline struct xt_counters *alloc_counters(struct arpt_table *table)
734} 731}
735 732
736static int copy_entries_to_user(unsigned int total_size, 733static int copy_entries_to_user(unsigned int total_size,
737 struct arpt_table *table, 734 struct xt_table *table,
738 void __user *userptr) 735 void __user *userptr)
739{ 736{
740 unsigned int off, num; 737 unsigned int off, num;
@@ -854,7 +851,7 @@ static int compat_table_info(const struct xt_table_info *info,
854static int get_info(struct net *net, void __user *user, int *len, int compat) 851static int get_info(struct net *net, void __user *user, int *len, int compat)
855{ 852{
856 char name[ARPT_TABLE_MAXNAMELEN]; 853 char name[ARPT_TABLE_MAXNAMELEN];
857 struct arpt_table *t; 854 struct xt_table *t;
858 int ret; 855 int ret;
859 856
860 if (*len != sizeof(struct arpt_getinfo)) { 857 if (*len != sizeof(struct arpt_getinfo)) {
@@ -875,7 +872,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
875 "arptable_%s", name); 872 "arptable_%s", name);
876 if (t && !IS_ERR(t)) { 873 if (t && !IS_ERR(t)) {
877 struct arpt_getinfo info; 874 struct arpt_getinfo info;
878 struct xt_table_info *private = t->private; 875 const struct xt_table_info *private = t->private;
879 876
880#ifdef CONFIG_COMPAT 877#ifdef CONFIG_COMPAT
881 if (compat) { 878 if (compat) {
@@ -914,7 +911,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
914{ 911{
915 int ret; 912 int ret;
916 struct arpt_get_entries get; 913 struct arpt_get_entries get;
917 struct arpt_table *t; 914 struct xt_table *t;
918 915
919 if (*len < sizeof(get)) { 916 if (*len < sizeof(get)) {
920 duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); 917 duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));
@@ -930,7 +927,8 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
930 927
931 t = xt_find_table_lock(net, NF_ARP, get.name); 928 t = xt_find_table_lock(net, NF_ARP, get.name);
932 if (t && !IS_ERR(t)) { 929 if (t && !IS_ERR(t)) {
933 struct xt_table_info *private = t->private; 930 const struct xt_table_info *private = t->private;
931
934 duprintf("t->private->number = %u\n", 932 duprintf("t->private->number = %u\n",
935 private->number); 933 private->number);
936 if (get.size == private->size) 934 if (get.size == private->size)
@@ -939,7 +937,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
939 else { 937 else {
940 duprintf("get_entries: I've got %u not %u!\n", 938 duprintf("get_entries: I've got %u not %u!\n",
941 private->size, get.size); 939 private->size, get.size);
942 ret = -EINVAL; 940 ret = -EAGAIN;
943 } 941 }
944 module_put(t->me); 942 module_put(t->me);
945 xt_table_unlock(t); 943 xt_table_unlock(t);
@@ -956,7 +954,7 @@ static int __do_replace(struct net *net, const char *name,
956 void __user *counters_ptr) 954 void __user *counters_ptr)
957{ 955{
958 int ret; 956 int ret;
959 struct arpt_table *t; 957 struct xt_table *t;
960 struct xt_table_info *oldinfo; 958 struct xt_table_info *oldinfo;
961 struct xt_counters *counters; 959 struct xt_counters *counters;
962 void *loc_cpu_old_entry; 960 void *loc_cpu_old_entry;
@@ -1090,11 +1088,11 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1090 struct xt_counters_info tmp; 1088 struct xt_counters_info tmp;
1091 struct xt_counters *paddc; 1089 struct xt_counters *paddc;
1092 unsigned int num_counters; 1090 unsigned int num_counters;
1093 char *name; 1091 const char *name;
1094 int size; 1092 int size;
1095 void *ptmp; 1093 void *ptmp;
1096 struct arpt_table *t; 1094 struct xt_table *t;
1097 struct xt_table_info *private; 1095 const struct xt_table_info *private;
1098 int ret = 0; 1096 int ret = 0;
1099 void *loc_cpu_entry; 1097 void *loc_cpu_entry;
1100#ifdef CONFIG_COMPAT 1098#ifdef CONFIG_COMPAT
@@ -1499,11 +1497,11 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1499 1497
1500 switch (cmd) { 1498 switch (cmd) {
1501 case ARPT_SO_SET_REPLACE: 1499 case ARPT_SO_SET_REPLACE:
1502 ret = compat_do_replace(sk->sk_net, user, len); 1500 ret = compat_do_replace(sock_net(sk), user, len);
1503 break; 1501 break;
1504 1502
1505 case ARPT_SO_SET_ADD_COUNTERS: 1503 case ARPT_SO_SET_ADD_COUNTERS:
1506 ret = do_add_counters(sk->sk_net, user, len, 1); 1504 ret = do_add_counters(sock_net(sk), user, len, 1);
1507 break; 1505 break;
1508 1506
1509 default: 1507 default:
@@ -1557,11 +1555,11 @@ out:
1557} 1555}
1558 1556
1559static int compat_copy_entries_to_user(unsigned int total_size, 1557static int compat_copy_entries_to_user(unsigned int total_size,
1560 struct arpt_table *table, 1558 struct xt_table *table,
1561 void __user *userptr) 1559 void __user *userptr)
1562{ 1560{
1563 struct xt_counters *counters; 1561 struct xt_counters *counters;
1564 struct xt_table_info *private = table->private; 1562 const struct xt_table_info *private = table->private;
1565 void __user *pos; 1563 void __user *pos;
1566 unsigned int size; 1564 unsigned int size;
1567 int ret = 0; 1565 int ret = 0;
@@ -1595,7 +1593,7 @@ static int compat_get_entries(struct net *net,
1595{ 1593{
1596 int ret; 1594 int ret;
1597 struct compat_arpt_get_entries get; 1595 struct compat_arpt_get_entries get;
1598 struct arpt_table *t; 1596 struct xt_table *t;
1599 1597
1600 if (*len < sizeof(get)) { 1598 if (*len < sizeof(get)) {
1601 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); 1599 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
@@ -1612,7 +1610,7 @@ static int compat_get_entries(struct net *net,
1612 xt_compat_lock(NF_ARP); 1610 xt_compat_lock(NF_ARP);
1613 t = xt_find_table_lock(net, NF_ARP, get.name); 1611 t = xt_find_table_lock(net, NF_ARP, get.name);
1614 if (t && !IS_ERR(t)) { 1612 if (t && !IS_ERR(t)) {
1615 struct xt_table_info *private = t->private; 1613 const struct xt_table_info *private = t->private;
1616 struct xt_table_info info; 1614 struct xt_table_info info;
1617 1615
1618 duprintf("t->private->number = %u\n", private->number); 1616 duprintf("t->private->number = %u\n", private->number);
@@ -1623,7 +1621,7 @@ static int compat_get_entries(struct net *net,
1623 } else if (!ret) { 1621 } else if (!ret) {
1624 duprintf("compat_get_entries: I've got %u not %u!\n", 1622 duprintf("compat_get_entries: I've got %u not %u!\n",
1625 private->size, get.size); 1623 private->size, get.size);
1626 ret = -EINVAL; 1624 ret = -EAGAIN;
1627 } 1625 }
1628 xt_compat_flush_offsets(NF_ARP); 1626 xt_compat_flush_offsets(NF_ARP);
1629 module_put(t->me); 1627 module_put(t->me);
@@ -1647,10 +1645,10 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
1647 1645
1648 switch (cmd) { 1646 switch (cmd) {
1649 case ARPT_SO_GET_INFO: 1647 case ARPT_SO_GET_INFO:
1650 ret = get_info(sk->sk_net, user, len, 1); 1648 ret = get_info(sock_net(sk), user, len, 1);
1651 break; 1649 break;
1652 case ARPT_SO_GET_ENTRIES: 1650 case ARPT_SO_GET_ENTRIES:
1653 ret = compat_get_entries(sk->sk_net, user, len); 1651 ret = compat_get_entries(sock_net(sk), user, len);
1654 break; 1652 break;
1655 default: 1653 default:
1656 ret = do_arpt_get_ctl(sk, cmd, user, len); 1654 ret = do_arpt_get_ctl(sk, cmd, user, len);
@@ -1668,11 +1666,11 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned
1668 1666
1669 switch (cmd) { 1667 switch (cmd) {
1670 case ARPT_SO_SET_REPLACE: 1668 case ARPT_SO_SET_REPLACE:
1671 ret = do_replace(sk->sk_net, user, len); 1669 ret = do_replace(sock_net(sk), user, len);
1672 break; 1670 break;
1673 1671
1674 case ARPT_SO_SET_ADD_COUNTERS: 1672 case ARPT_SO_SET_ADD_COUNTERS:
1675 ret = do_add_counters(sk->sk_net, user, len, 0); 1673 ret = do_add_counters(sock_net(sk), user, len, 0);
1676 break; 1674 break;
1677 1675
1678 default: 1676 default:
@@ -1692,11 +1690,11 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
1692 1690
1693 switch (cmd) { 1691 switch (cmd) {
1694 case ARPT_SO_GET_INFO: 1692 case ARPT_SO_GET_INFO:
1695 ret = get_info(sk->sk_net, user, len, 0); 1693 ret = get_info(sock_net(sk), user, len, 0);
1696 break; 1694 break;
1697 1695
1698 case ARPT_SO_GET_ENTRIES: 1696 case ARPT_SO_GET_ENTRIES:
1699 ret = get_entries(sk->sk_net, user, len); 1697 ret = get_entries(sock_net(sk), user, len);
1700 break; 1698 break;
1701 1699
1702 case ARPT_SO_GET_REVISION_TARGET: { 1700 case ARPT_SO_GET_REVISION_TARGET: {
@@ -1725,9 +1723,8 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
1725 return ret; 1723 return ret;
1726} 1724}
1727 1725
1728struct arpt_table *arpt_register_table(struct net *net, 1726struct xt_table *arpt_register_table(struct net *net, struct xt_table *table,
1729 struct arpt_table *table, 1727 const struct arpt_replace *repl)
1730 const struct arpt_replace *repl)
1731{ 1728{
1732 int ret; 1729 int ret;
1733 struct xt_table_info *newinfo; 1730 struct xt_table_info *newinfo;
@@ -1769,7 +1766,7 @@ out:
1769 return ERR_PTR(ret); 1766 return ERR_PTR(ret);
1770} 1767}
1771 1768
1772void arpt_unregister_table(struct arpt_table *table) 1769void arpt_unregister_table(struct xt_table *table)
1773{ 1770{
1774 struct xt_table_info *private; 1771 struct xt_table_info *private;
1775 void *loc_cpu_entry; 1772 void *loc_cpu_entry;
@@ -1787,7 +1784,7 @@ void arpt_unregister_table(struct arpt_table *table)
1787} 1784}
1788 1785
1789/* The built-in targets: standard (NULL) and error. */ 1786/* The built-in targets: standard (NULL) and error. */
1790static struct arpt_target arpt_standard_target __read_mostly = { 1787static struct xt_target arpt_standard_target __read_mostly = {
1791 .name = ARPT_STANDARD_TARGET, 1788 .name = ARPT_STANDARD_TARGET,
1792 .targetsize = sizeof(int), 1789 .targetsize = sizeof(int),
1793 .family = NF_ARP, 1790 .family = NF_ARP,
@@ -1798,7 +1795,7 @@ static struct arpt_target arpt_standard_target __read_mostly = {
1798#endif 1795#endif
1799}; 1796};
1800 1797
1801static struct arpt_target arpt_error_target __read_mostly = { 1798static struct xt_target arpt_error_target __read_mostly = {
1802 .name = ARPT_ERROR_TARGET, 1799 .name = ARPT_ERROR_TARGET,
1803 .target = arpt_error, 1800 .target = arpt_error,
1804 .targetsize = ARPT_FUNCTION_MAXNAMELEN, 1801 .targetsize = ARPT_FUNCTION_MAXNAMELEN,
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index 3f4222b0a803..a385959d2655 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -15,7 +15,7 @@ target(struct sk_buff *skb,
15 const void *targinfo) 15 const void *targinfo)
16{ 16{
17 const struct arpt_mangle *mangle = targinfo; 17 const struct arpt_mangle *mangle = targinfo;
18 struct arphdr *arp; 18 const struct arphdr *arp;
19 unsigned char *arpptr; 19 unsigned char *arpptr;
20 int pln, hln; 20 int pln, hln;
21 21
@@ -73,8 +73,9 @@ checkentry(const char *tablename, const void *e, const struct xt_target *target,
73 return true; 73 return true;
74} 74}
75 75
76static struct arpt_target arpt_mangle_reg __read_mostly = { 76static struct xt_target arpt_mangle_reg __read_mostly = {
77 .name = "mangle", 77 .name = "mangle",
78 .family = NF_ARP,
78 .target = target, 79 .target = target,
79 .targetsize = sizeof(struct arpt_mangle), 80 .targetsize = sizeof(struct arpt_mangle),
80 .checkentry = checkentry, 81 .checkentry = checkentry,
@@ -83,15 +84,12 @@ static struct arpt_target arpt_mangle_reg __read_mostly = {
83 84
84static int __init arpt_mangle_init(void) 85static int __init arpt_mangle_init(void)
85{ 86{
86 if (arpt_register_target(&arpt_mangle_reg)) 87 return xt_register_target(&arpt_mangle_reg);
87 return -EINVAL;
88
89 return 0;
90} 88}
91 89
92static void __exit arpt_mangle_fini(void) 90static void __exit arpt_mangle_fini(void)
93{ 91{
94 arpt_unregister_target(&arpt_mangle_reg); 92 xt_unregister_target(&arpt_mangle_reg);
95} 93}
96 94
97module_init(arpt_mangle_init); 95module_init(arpt_mangle_init);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 4e9c496a30c2..3be4d07e7ed9 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -45,10 +45,10 @@ static struct
45 .term = ARPT_ERROR_INIT, 45 .term = ARPT_ERROR_INIT,
46}; 46};
47 47
48static struct arpt_table packet_filter = { 48static struct xt_table packet_filter = {
49 .name = "filter", 49 .name = "filter",
50 .valid_hooks = FILTER_VALID_HOOKS, 50 .valid_hooks = FILTER_VALID_HOOKS,
51 .lock = RW_LOCK_UNLOCKED, 51 .lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
52 .private = NULL, 52 .private = NULL,
53 .me = THIS_MODULE, 53 .me = THIS_MODULE,
54 .af = NF_ARP, 54 .af = NF_ARP,
@@ -70,18 +70,21 @@ static struct nf_hook_ops arpt_ops[] __read_mostly = {
70 .owner = THIS_MODULE, 70 .owner = THIS_MODULE,
71 .pf = NF_ARP, 71 .pf = NF_ARP,
72 .hooknum = NF_ARP_IN, 72 .hooknum = NF_ARP_IN,
73 .priority = NF_IP_PRI_FILTER,
73 }, 74 },
74 { 75 {
75 .hook = arpt_hook, 76 .hook = arpt_hook,
76 .owner = THIS_MODULE, 77 .owner = THIS_MODULE,
77 .pf = NF_ARP, 78 .pf = NF_ARP,
78 .hooknum = NF_ARP_OUT, 79 .hooknum = NF_ARP_OUT,
80 .priority = NF_IP_PRI_FILTER,
79 }, 81 },
80 { 82 {
81 .hook = arpt_hook, 83 .hook = arpt_hook,
82 .owner = THIS_MODULE, 84 .owner = THIS_MODULE,
83 .pf = NF_ARP, 85 .pf = NF_ARP,
84 .hooknum = NF_ARP_FORWARD, 86 .hooknum = NF_ARP_FORWARD,
87 .priority = NF_IP_PRI_FILTER,
85 }, 88 },
86}; 89};
87 90
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 4dc162894cb2..719be29f7506 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -481,7 +481,7 @@ ipq_rcv_dev_event(struct notifier_block *this,
481{ 481{
482 struct net_device *dev = ptr; 482 struct net_device *dev = ptr;
483 483
484 if (dev->nd_net != &init_net) 484 if (dev_net(dev) != &init_net)
485 return NOTIFY_DONE; 485 return NOTIFY_DONE;
486 486
487 /* Drop any packets associated with the downed device */ 487 /* Drop any packets associated with the downed device */
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 600737f122d2..4e7c719445c2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -53,7 +53,7 @@ MODULE_DESCRIPTION("IPv4 packet filter");
53do { \ 53do { \
54 if (!(x)) \ 54 if (!(x)) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \ 55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __FUNCTION__, __FILE__, __LINE__); \ 56 __func__, __FILE__, __LINE__); \
57} while(0) 57} while(0)
58#else 58#else
59#define IP_NF_ASSERT(x) 59#define IP_NF_ASSERT(x)
@@ -296,7 +296,7 @@ static void trace_packet(struct sk_buff *skb,
296 struct ipt_entry *e) 296 struct ipt_entry *e)
297{ 297{
298 void *table_base; 298 void *table_base;
299 struct ipt_entry *root; 299 const struct ipt_entry *root;
300 char *hookname, *chainname, *comment; 300 char *hookname, *chainname, *comment;
301 unsigned int rulenum = 0; 301 unsigned int rulenum = 0;
302 302
@@ -327,7 +327,7 @@ ipt_do_table(struct sk_buff *skb,
327{ 327{
328 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 328 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
329 u_int16_t offset; 329 u_int16_t offset;
330 struct iphdr *ip; 330 const struct iphdr *ip;
331 u_int16_t datalen; 331 u_int16_t datalen;
332 bool hotdrop = false; 332 bool hotdrop = false;
333 /* Initializing verdict to NF_DROP keeps gcc happy. */ 333 /* Initializing verdict to NF_DROP keeps gcc happy. */
@@ -926,7 +926,7 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
926{ 926{
927 unsigned int countersize; 927 unsigned int countersize;
928 struct xt_counters *counters; 928 struct xt_counters *counters;
929 struct xt_table_info *private = table->private; 929 const struct xt_table_info *private = table->private;
930 930
931 /* We need atomic snapshot of counters: rest doesn't change 931 /* We need atomic snapshot of counters: rest doesn't change
932 (other than comefrom, which userspace doesn't care 932 (other than comefrom, which userspace doesn't care
@@ -953,9 +953,9 @@ copy_entries_to_user(unsigned int total_size,
953 unsigned int off, num; 953 unsigned int off, num;
954 struct ipt_entry *e; 954 struct ipt_entry *e;
955 struct xt_counters *counters; 955 struct xt_counters *counters;
956 struct xt_table_info *private = table->private; 956 const struct xt_table_info *private = table->private;
957 int ret = 0; 957 int ret = 0;
958 void *loc_cpu_entry; 958 const void *loc_cpu_entry;
959 959
960 counters = alloc_counters(table); 960 counters = alloc_counters(table);
961 if (IS_ERR(counters)) 961 if (IS_ERR(counters))
@@ -975,8 +975,8 @@ copy_entries_to_user(unsigned int total_size,
975 /* ... then go back and fix counters and names */ 975 /* ... then go back and fix counters and names */
976 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 976 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
977 unsigned int i; 977 unsigned int i;
978 struct ipt_entry_match *m; 978 const struct ipt_entry_match *m;
979 struct ipt_entry_target *t; 979 const struct ipt_entry_target *t;
980 980
981 e = (struct ipt_entry *)(loc_cpu_entry + off); 981 e = (struct ipt_entry *)(loc_cpu_entry + off);
982 if (copy_to_user(userptr + off 982 if (copy_to_user(userptr + off
@@ -1116,7 +1116,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1116 "iptable_%s", name); 1116 "iptable_%s", name);
1117 if (t && !IS_ERR(t)) { 1117 if (t && !IS_ERR(t)) {
1118 struct ipt_getinfo info; 1118 struct ipt_getinfo info;
1119 struct xt_table_info *private = t->private; 1119 const struct xt_table_info *private = t->private;
1120 1120
1121#ifdef CONFIG_COMPAT 1121#ifdef CONFIG_COMPAT
1122 if (compat) { 1122 if (compat) {
@@ -1172,7 +1172,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1172 1172
1173 t = xt_find_table_lock(net, AF_INET, get.name); 1173 t = xt_find_table_lock(net, AF_INET, get.name);
1174 if (t && !IS_ERR(t)) { 1174 if (t && !IS_ERR(t)) {
1175 struct xt_table_info *private = t->private; 1175 const struct xt_table_info *private = t->private;
1176 duprintf("t->private->number = %u\n", private->number); 1176 duprintf("t->private->number = %u\n", private->number);
1177 if (get.size == private->size) 1177 if (get.size == private->size)
1178 ret = copy_entries_to_user(private->size, 1178 ret = copy_entries_to_user(private->size,
@@ -1180,7 +1180,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1180 else { 1180 else {
1181 duprintf("get_entries: I've got %u not %u!\n", 1181 duprintf("get_entries: I've got %u not %u!\n",
1182 private->size, get.size); 1182 private->size, get.size);
1183 ret = -EINVAL; 1183 ret = -EAGAIN;
1184 } 1184 }
1185 module_put(t->me); 1185 module_put(t->me);
1186 xt_table_unlock(t); 1186 xt_table_unlock(t);
@@ -1337,11 +1337,11 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1337 struct xt_counters_info tmp; 1337 struct xt_counters_info tmp;
1338 struct xt_counters *paddc; 1338 struct xt_counters *paddc;
1339 unsigned int num_counters; 1339 unsigned int num_counters;
1340 char *name; 1340 const char *name;
1341 int size; 1341 int size;
1342 void *ptmp; 1342 void *ptmp;
1343 struct xt_table *t; 1343 struct xt_table *t;
1344 struct xt_table_info *private; 1344 const struct xt_table_info *private;
1345 int ret = 0; 1345 int ret = 0;
1346 void *loc_cpu_entry; 1346 void *loc_cpu_entry;
1347#ifdef CONFIG_COMPAT 1347#ifdef CONFIG_COMPAT
@@ -1852,11 +1852,11 @@ compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1852 1852
1853 switch (cmd) { 1853 switch (cmd) {
1854 case IPT_SO_SET_REPLACE: 1854 case IPT_SO_SET_REPLACE:
1855 ret = compat_do_replace(sk->sk_net, user, len); 1855 ret = compat_do_replace(sock_net(sk), user, len);
1856 break; 1856 break;
1857 1857
1858 case IPT_SO_SET_ADD_COUNTERS: 1858 case IPT_SO_SET_ADD_COUNTERS:
1859 ret = do_add_counters(sk->sk_net, user, len, 1); 1859 ret = do_add_counters(sock_net(sk), user, len, 1);
1860 break; 1860 break;
1861 1861
1862 default: 1862 default:
@@ -1878,11 +1878,11 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1878 void __user *userptr) 1878 void __user *userptr)
1879{ 1879{
1880 struct xt_counters *counters; 1880 struct xt_counters *counters;
1881 struct xt_table_info *private = table->private; 1881 const struct xt_table_info *private = table->private;
1882 void __user *pos; 1882 void __user *pos;
1883 unsigned int size; 1883 unsigned int size;
1884 int ret = 0; 1884 int ret = 0;
1885 void *loc_cpu_entry; 1885 const void *loc_cpu_entry;
1886 unsigned int i = 0; 1886 unsigned int i = 0;
1887 1887
1888 counters = alloc_counters(table); 1888 counters = alloc_counters(table);
@@ -1929,7 +1929,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1929 xt_compat_lock(AF_INET); 1929 xt_compat_lock(AF_INET);
1930 t = xt_find_table_lock(net, AF_INET, get.name); 1930 t = xt_find_table_lock(net, AF_INET, get.name);
1931 if (t && !IS_ERR(t)) { 1931 if (t && !IS_ERR(t)) {
1932 struct xt_table_info *private = t->private; 1932 const struct xt_table_info *private = t->private;
1933 struct xt_table_info info; 1933 struct xt_table_info info;
1934 duprintf("t->private->number = %u\n", private->number); 1934 duprintf("t->private->number = %u\n", private->number);
1935 ret = compat_table_info(private, &info); 1935 ret = compat_table_info(private, &info);
@@ -1939,7 +1939,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1939 } else if (!ret) { 1939 } else if (!ret) {
1940 duprintf("compat_get_entries: I've got %u not %u!\n", 1940 duprintf("compat_get_entries: I've got %u not %u!\n",
1941 private->size, get.size); 1941 private->size, get.size);
1942 ret = -EINVAL; 1942 ret = -EAGAIN;
1943 } 1943 }
1944 xt_compat_flush_offsets(AF_INET); 1944 xt_compat_flush_offsets(AF_INET);
1945 module_put(t->me); 1945 module_put(t->me);
@@ -1963,10 +1963,10 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1963 1963
1964 switch (cmd) { 1964 switch (cmd) {
1965 case IPT_SO_GET_INFO: 1965 case IPT_SO_GET_INFO:
1966 ret = get_info(sk->sk_net, user, len, 1); 1966 ret = get_info(sock_net(sk), user, len, 1);
1967 break; 1967 break;
1968 case IPT_SO_GET_ENTRIES: 1968 case IPT_SO_GET_ENTRIES:
1969 ret = compat_get_entries(sk->sk_net, user, len); 1969 ret = compat_get_entries(sock_net(sk), user, len);
1970 break; 1970 break;
1971 default: 1971 default:
1972 ret = do_ipt_get_ctl(sk, cmd, user, len); 1972 ret = do_ipt_get_ctl(sk, cmd, user, len);
@@ -1985,11 +1985,11 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1985 1985
1986 switch (cmd) { 1986 switch (cmd) {
1987 case IPT_SO_SET_REPLACE: 1987 case IPT_SO_SET_REPLACE:
1988 ret = do_replace(sk->sk_net, user, len); 1988 ret = do_replace(sock_net(sk), user, len);
1989 break; 1989 break;
1990 1990
1991 case IPT_SO_SET_ADD_COUNTERS: 1991 case IPT_SO_SET_ADD_COUNTERS:
1992 ret = do_add_counters(sk->sk_net, user, len, 0); 1992 ret = do_add_counters(sock_net(sk), user, len, 0);
1993 break; 1993 break;
1994 1994
1995 default: 1995 default:
@@ -2010,11 +2010,11 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2010 2010
2011 switch (cmd) { 2011 switch (cmd) {
2012 case IPT_SO_GET_INFO: 2012 case IPT_SO_GET_INFO:
2013 ret = get_info(sk->sk_net, user, len, 0); 2013 ret = get_info(sock_net(sk), user, len, 0);
2014 break; 2014 break;
2015 2015
2016 case IPT_SO_GET_ENTRIES: 2016 case IPT_SO_GET_ENTRIES:
2017 ret = get_entries(sk->sk_net, user, len); 2017 ret = get_entries(sock_net(sk), user, len);
2018 break; 2018 break;
2019 2019
2020 case IPT_SO_GET_REVISION_MATCH: 2020 case IPT_SO_GET_REVISION_MATCH:
@@ -2130,7 +2130,8 @@ icmp_match(const struct sk_buff *skb,
2130 unsigned int protoff, 2130 unsigned int protoff,
2131 bool *hotdrop) 2131 bool *hotdrop)
2132{ 2132{
2133 struct icmphdr _icmph, *ic; 2133 const struct icmphdr *ic;
2134 struct icmphdr _icmph;
2134 const struct ipt_icmp *icmpinfo = matchinfo; 2135 const struct ipt_icmp *icmpinfo = matchinfo;
2135 2136
2136 /* Must not be a fragment. */ 2137 /* Must not be a fragment. */
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a12dd329e208..22d8e7cd9197 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -144,7 +144,7 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
144} 144}
145 145
146static struct clusterip_config * 146static struct clusterip_config *
147clusterip_config_init(struct ipt_clusterip_tgt_info *i, __be32 ip, 147clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
148 struct net_device *dev) 148 struct net_device *dev)
149{ 149{
150 struct clusterip_config *c; 150 struct clusterip_config *c;
@@ -333,7 +333,7 @@ clusterip_tg(struct sk_buff *skb, const struct net_device *in,
333 } 333 }
334 334
335#ifdef DEBUG 335#ifdef DEBUG
336 DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 336 nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
337#endif 337#endif
338 pr_debug("hash=%u ct_hash=%u ", hash, ct->mark); 338 pr_debug("hash=%u ct_hash=%u ", hash, ct->mark);
339 if (!clusterip_responsible(cipinfo->config, hash)) { 339 if (!clusterip_responsible(cipinfo->config, hash)) {
@@ -418,7 +418,7 @@ clusterip_tg_check(const char *tablename, const void *e_void,
418/* drop reference count of cluster config when rule is deleted */ 418/* drop reference count of cluster config when rule is deleted */
419static void clusterip_tg_destroy(const struct xt_target *target, void *targinfo) 419static void clusterip_tg_destroy(const struct xt_target *target, void *targinfo)
420{ 420{
421 struct ipt_clusterip_tgt_info *cipinfo = targinfo; 421 const struct ipt_clusterip_tgt_info *cipinfo = targinfo;
422 422
423 /* if no more entries are referencing the config, remove it 423 /* if no more entries are referencing the config, remove it
424 * from the list and destroy the proc entry */ 424 * from the list and destroy the proc entry */
@@ -567,7 +567,7 @@ struct clusterip_seq_position {
567 567
568static void *clusterip_seq_start(struct seq_file *s, loff_t *pos) 568static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
569{ 569{
570 struct proc_dir_entry *pde = s->private; 570 const struct proc_dir_entry *pde = s->private;
571 struct clusterip_config *c = pde->data; 571 struct clusterip_config *c = pde->data;
572 unsigned int weight; 572 unsigned int weight;
573 u_int32_t local_nodes; 573 u_int32_t local_nodes;
@@ -594,7 +594,7 @@ static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
594 594
595static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos) 595static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos)
596{ 596{
597 struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v; 597 struct clusterip_seq_position *idx = v;
598 598
599 *pos = ++idx->pos; 599 *pos = ++idx->pos;
600 if (*pos >= idx->weight) { 600 if (*pos >= idx->weight) {
@@ -613,7 +613,7 @@ static void clusterip_seq_stop(struct seq_file *s, void *v)
613 613
614static int clusterip_seq_show(struct seq_file *s, void *v) 614static int clusterip_seq_show(struct seq_file *s, void *v)
615{ 615{
616 struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v; 616 struct clusterip_seq_position *idx = v;
617 617
618 if (idx->pos != 0) 618 if (idx->pos != 0)
619 seq_putc(s, ','); 619 seq_putc(s, ',');
@@ -669,7 +669,7 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
669{ 669{
670#define PROC_WRITELEN 10 670#define PROC_WRITELEN 10
671 char buffer[PROC_WRITELEN+1]; 671 char buffer[PROC_WRITELEN+1];
672 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 672 const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
673 struct clusterip_config *c = pde->data; 673 struct clusterip_config *c = pde->data;
674 unsigned long nodenum; 674 unsigned long nodenum;
675 675
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 21395bc2b27f..d60139c134ca 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -100,7 +100,7 @@ ecn_tg_check(const char *tablename, const void *e_void,
100 const struct xt_target *target, void *targinfo, 100 const struct xt_target *target, void *targinfo,
101 unsigned int hook_mask) 101 unsigned int hook_mask)
102{ 102{
103 const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo; 103 const struct ipt_ECN_info *einfo = targinfo;
104 const struct ipt_entry *e = e_void; 104 const struct ipt_entry *e = e_void;
105 105
106 if (einfo->operation & IPT_ECN_OP_MASK) { 106 if (einfo->operation & IPT_ECN_OP_MASK) {
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index b38d7850f506..0af14137137b 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -76,7 +76,8 @@ static void dump_packet(const struct nf_loginfo *info,
76 76
77 if ((logflags & IPT_LOG_IPOPT) 77 if ((logflags & IPT_LOG_IPOPT)
78 && ih->ihl * 4 > sizeof(struct iphdr)) { 78 && ih->ihl * 4 > sizeof(struct iphdr)) {
79 unsigned char _opt[4 * 15 - sizeof(struct iphdr)], *op; 79 const unsigned char *op;
80 unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
80 unsigned int i, optsize; 81 unsigned int i, optsize;
81 82
82 optsize = ih->ihl * 4 - sizeof(struct iphdr); 83 optsize = ih->ihl * 4 - sizeof(struct iphdr);
@@ -338,12 +339,16 @@ static void dump_packet(const struct nf_loginfo *info,
338 if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) { 339 if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
339 read_lock_bh(&skb->sk->sk_callback_lock); 340 read_lock_bh(&skb->sk->sk_callback_lock);
340 if (skb->sk->sk_socket && skb->sk->sk_socket->file) 341 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
341 printk("UID=%u GID=%u", 342 printk("UID=%u GID=%u ",
342 skb->sk->sk_socket->file->f_uid, 343 skb->sk->sk_socket->file->f_uid,
343 skb->sk->sk_socket->file->f_gid); 344 skb->sk->sk_socket->file->f_gid);
344 read_unlock_bh(&skb->sk->sk_callback_lock); 345 read_unlock_bh(&skb->sk->sk_callback_lock);
345 } 346 }
346 347
348 /* Max length: 16 "MARK=0xFFFFFFFF " */
349 if (!iphoff && skb->mark)
350 printk("MARK=0x%x ", skb->mark);
351
347 /* Proto Max log string length */ 352 /* Proto Max log string length */
348 /* IP: 40+46+6+11+127 = 230 */ 353 /* IP: 40+46+6+11+127 = 230 */
349 /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */ 354 /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d80fee8327e4..84c26dd27d81 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -77,7 +77,7 @@ masquerade_tg(struct sk_buff *skb, const struct net_device *in,
77 return NF_ACCEPT; 77 return NF_ACCEPT;
78 78
79 mr = targinfo; 79 mr = targinfo;
80 rt = (struct rtable *)skb->dst; 80 rt = skb->rtable;
81 newsrc = inet_select_addr(out, rt->rt_gateway, RT_SCOPE_UNIVERSE); 81 newsrc = inet_select_addr(out, rt->rt_gateway, RT_SCOPE_UNIVERSE);
82 if (!newsrc) { 82 if (!newsrc) {
83 printk("MASQUERADE: %s ate my IP address\n", out->name); 83 printk("MASQUERADE: %s ate my IP address\n", out->name);
@@ -120,7 +120,7 @@ static int masq_device_event(struct notifier_block *this,
120{ 120{
121 const struct net_device *dev = ptr; 121 const struct net_device *dev = ptr;
122 122
123 if (dev->nd_net != &init_net) 123 if (dev_net(dev) != &init_net)
124 return NOTIFY_DONE; 124 return NOTIFY_DONE;
125 125
126 if (event == NETDEV_DOWN) { 126 if (event == NETDEV_DOWN) {
@@ -139,18 +139,8 @@ static int masq_inet_event(struct notifier_block *this,
139 unsigned long event, 139 unsigned long event,
140 void *ptr) 140 void *ptr)
141{ 141{
142 const struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; 142 struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
143 143 return masq_device_event(this, event, dev);
144 if (event == NETDEV_DOWN) {
145 /* IP address was deleted. Search entire table for
146 conntracks which were associated with that device,
147 and forget them. */
148 NF_CT_ASSERT(dev->ifindex != 0);
149
150 nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex);
151 }
152
153 return NOTIFY_DONE;
154} 144}
155 145
156static struct notifier_block masq_dev_notifier = { 146static struct notifier_block masq_dev_notifier = {
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 22606e2baa16..2639872849da 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -35,8 +35,10 @@ MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv4");
35static void send_reset(struct sk_buff *oldskb, int hook) 35static void send_reset(struct sk_buff *oldskb, int hook)
36{ 36{
37 struct sk_buff *nskb; 37 struct sk_buff *nskb;
38 struct iphdr *oiph, *niph; 38 const struct iphdr *oiph;
39 struct tcphdr _otcph, *oth, *tcph; 39 struct iphdr *niph;
40 const struct tcphdr *oth;
41 struct tcphdr _otcph, *tcph;
40 unsigned int addr_type; 42 unsigned int addr_type;
41 43
42 /* IP header checks: fragment. */ 44 /* IP header checks: fragment. */
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 50e06690eb5b..21cb053f5d7d 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -340,7 +340,7 @@ static void *recent_seq_start(struct seq_file *seq, loff_t *pos)
340static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos) 340static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
341{ 341{
342 struct recent_iter_state *st = seq->private; 342 struct recent_iter_state *st = seq->private;
343 struct recent_table *t = st->table; 343 const struct recent_table *t = st->table;
344 struct recent_entry *e = v; 344 struct recent_entry *e = v;
345 struct list_head *head = e->list.next; 345 struct list_head *head = e->list.next;
346 346
@@ -361,7 +361,7 @@ static void recent_seq_stop(struct seq_file *s, void *v)
361 361
362static int recent_seq_show(struct seq_file *seq, void *v) 362static int recent_seq_show(struct seq_file *seq, void *v)
363{ 363{
364 struct recent_entry *e = v; 364 const struct recent_entry *e = v;
365 unsigned int i; 365 unsigned int i;
366 366
367 i = (e->index - 1) % ip_pkt_list_tot; 367 i = (e->index - 1) % ip_pkt_list_tot;
@@ -396,7 +396,7 @@ static int recent_seq_open(struct inode *inode, struct file *file)
396static ssize_t recent_proc_write(struct file *file, const char __user *input, 396static ssize_t recent_proc_write(struct file *file, const char __user *input,
397 size_t size, loff_t *loff) 397 size_t size, loff_t *loff)
398{ 398{
399 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 399 const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
400 struct recent_table *t = pde->data; 400 struct recent_table *t = pde->data;
401 struct recent_entry *e; 401 struct recent_entry *e;
402 char buf[sizeof("+255.255.255.255")], *c = buf; 402 char buf[sizeof("+255.255.255.255")], *c = buf;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 69f3d7e6e96f..1ea677dcf845 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -56,20 +56,32 @@ static struct
56static struct xt_table packet_filter = { 56static struct xt_table packet_filter = {
57 .name = "filter", 57 .name = "filter",
58 .valid_hooks = FILTER_VALID_HOOKS, 58 .valid_hooks = FILTER_VALID_HOOKS,
59 .lock = RW_LOCK_UNLOCKED, 59 .lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
60 .me = THIS_MODULE, 60 .me = THIS_MODULE,
61 .af = AF_INET, 61 .af = AF_INET,
62}; 62};
63 63
64/* The work comes in here from netfilter.c. */ 64/* The work comes in here from netfilter.c. */
65static unsigned int 65static unsigned int
66ipt_local_in_hook(unsigned int hook,
67 struct sk_buff *skb,
68 const struct net_device *in,
69 const struct net_device *out,
70 int (*okfn)(struct sk_buff *))
71{
72 return ipt_do_table(skb, hook, in, out,
73 nf_local_in_net(in, out)->ipv4.iptable_filter);
74}
75
76static unsigned int
66ipt_hook(unsigned int hook, 77ipt_hook(unsigned int hook,
67 struct sk_buff *skb, 78 struct sk_buff *skb,
68 const struct net_device *in, 79 const struct net_device *in,
69 const struct net_device *out, 80 const struct net_device *out,
70 int (*okfn)(struct sk_buff *)) 81 int (*okfn)(struct sk_buff *))
71{ 82{
72 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_filter); 83 return ipt_do_table(skb, hook, in, out,
84 nf_forward_net(in, out)->ipv4.iptable_filter);
73} 85}
74 86
75static unsigned int 87static unsigned int
@@ -88,12 +100,13 @@ ipt_local_out_hook(unsigned int hook,
88 return NF_ACCEPT; 100 return NF_ACCEPT;
89 } 101 }
90 102
91 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_filter); 103 return ipt_do_table(skb, hook, in, out,
104 nf_local_out_net(in, out)->ipv4.iptable_filter);
92} 105}
93 106
94static struct nf_hook_ops ipt_ops[] __read_mostly = { 107static struct nf_hook_ops ipt_ops[] __read_mostly = {
95 { 108 {
96 .hook = ipt_hook, 109 .hook = ipt_local_in_hook,
97 .owner = THIS_MODULE, 110 .owner = THIS_MODULE,
98 .pf = PF_INET, 111 .pf = PF_INET,
99 .hooknum = NF_INET_LOCAL_IN, 112 .hooknum = NF_INET_LOCAL_IN,
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index c55a210853a7..da59182f2226 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -67,20 +67,54 @@ static struct
67static struct xt_table packet_mangler = { 67static struct xt_table packet_mangler = {
68 .name = "mangle", 68 .name = "mangle",
69 .valid_hooks = MANGLE_VALID_HOOKS, 69 .valid_hooks = MANGLE_VALID_HOOKS,
70 .lock = RW_LOCK_UNLOCKED, 70 .lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
71 .me = THIS_MODULE, 71 .me = THIS_MODULE,
72 .af = AF_INET, 72 .af = AF_INET,
73}; 73};
74 74
75/* The work comes in here from netfilter.c. */ 75/* The work comes in here from netfilter.c. */
76static unsigned int 76static unsigned int
77ipt_route_hook(unsigned int hook, 77ipt_pre_routing_hook(unsigned int hook,
78 struct sk_buff *skb,
79 const struct net_device *in,
80 const struct net_device *out,
81 int (*okfn)(struct sk_buff *))
82{
83 return ipt_do_table(skb, hook, in, out,
84 nf_pre_routing_net(in, out)->ipv4.iptable_mangle);
85}
86
87static unsigned int
88ipt_post_routing_hook(unsigned int hook,
89 struct sk_buff *skb,
90 const struct net_device *in,
91 const struct net_device *out,
92 int (*okfn)(struct sk_buff *))
93{
94 return ipt_do_table(skb, hook, in, out,
95 nf_post_routing_net(in, out)->ipv4.iptable_mangle);
96}
97
98static unsigned int
99ipt_local_in_hook(unsigned int hook,
100 struct sk_buff *skb,
101 const struct net_device *in,
102 const struct net_device *out,
103 int (*okfn)(struct sk_buff *))
104{
105 return ipt_do_table(skb, hook, in, out,
106 nf_local_in_net(in, out)->ipv4.iptable_mangle);
107}
108
109static unsigned int
110ipt_forward_hook(unsigned int hook,
78 struct sk_buff *skb, 111 struct sk_buff *skb,
79 const struct net_device *in, 112 const struct net_device *in,
80 const struct net_device *out, 113 const struct net_device *out,
81 int (*okfn)(struct sk_buff *)) 114 int (*okfn)(struct sk_buff *))
82{ 115{
83 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_mangle); 116 return ipt_do_table(skb, hook, in, out,
117 nf_forward_net(in, out)->ipv4.iptable_mangle);
84} 118}
85 119
86static unsigned int 120static unsigned int
@@ -112,7 +146,8 @@ ipt_local_hook(unsigned int hook,
112 daddr = iph->daddr; 146 daddr = iph->daddr;
113 tos = iph->tos; 147 tos = iph->tos;
114 148
115 ret = ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_mangle); 149 ret = ipt_do_table(skb, hook, in, out,
150 nf_local_out_net(in, out)->ipv4.iptable_mangle);
116 /* Reroute for ANY change. */ 151 /* Reroute for ANY change. */
117 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { 152 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
118 iph = ip_hdr(skb); 153 iph = ip_hdr(skb);
@@ -130,21 +165,21 @@ ipt_local_hook(unsigned int hook,
130 165
131static struct nf_hook_ops ipt_ops[] __read_mostly = { 166static struct nf_hook_ops ipt_ops[] __read_mostly = {
132 { 167 {
133 .hook = ipt_route_hook, 168 .hook = ipt_pre_routing_hook,
134 .owner = THIS_MODULE, 169 .owner = THIS_MODULE,
135 .pf = PF_INET, 170 .pf = PF_INET,
136 .hooknum = NF_INET_PRE_ROUTING, 171 .hooknum = NF_INET_PRE_ROUTING,
137 .priority = NF_IP_PRI_MANGLE, 172 .priority = NF_IP_PRI_MANGLE,
138 }, 173 },
139 { 174 {
140 .hook = ipt_route_hook, 175 .hook = ipt_local_in_hook,
141 .owner = THIS_MODULE, 176 .owner = THIS_MODULE,
142 .pf = PF_INET, 177 .pf = PF_INET,
143 .hooknum = NF_INET_LOCAL_IN, 178 .hooknum = NF_INET_LOCAL_IN,
144 .priority = NF_IP_PRI_MANGLE, 179 .priority = NF_IP_PRI_MANGLE,
145 }, 180 },
146 { 181 {
147 .hook = ipt_route_hook, 182 .hook = ipt_forward_hook,
148 .owner = THIS_MODULE, 183 .owner = THIS_MODULE,
149 .pf = PF_INET, 184 .pf = PF_INET,
150 .hooknum = NF_INET_FORWARD, 185 .hooknum = NF_INET_FORWARD,
@@ -158,7 +193,7 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
158 .priority = NF_IP_PRI_MANGLE, 193 .priority = NF_IP_PRI_MANGLE,
159 }, 194 },
160 { 195 {
161 .hook = ipt_route_hook, 196 .hook = ipt_post_routing_hook,
162 .owner = THIS_MODULE, 197 .owner = THIS_MODULE,
163 .pf = PF_INET, 198 .pf = PF_INET,
164 .hooknum = NF_INET_POST_ROUTING, 199 .hooknum = NF_INET_POST_ROUTING,
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index e41fe8ca4e1c..fddce7754b72 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -39,7 +39,7 @@ static struct
39static struct xt_table packet_raw = { 39static struct xt_table packet_raw = {
40 .name = "raw", 40 .name = "raw",
41 .valid_hooks = RAW_VALID_HOOKS, 41 .valid_hooks = RAW_VALID_HOOKS,
42 .lock = RW_LOCK_UNLOCKED, 42 .lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
43 .me = THIS_MODULE, 43 .me = THIS_MODULE,
44 .af = AF_INET, 44 .af = AF_INET,
45}; 45};
@@ -52,7 +52,8 @@ ipt_hook(unsigned int hook,
52 const struct net_device *out, 52 const struct net_device *out,
53 int (*okfn)(struct sk_buff *)) 53 int (*okfn)(struct sk_buff *))
54{ 54{
55 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_raw); 55 return ipt_do_table(skb, hook, in, out,
56 nf_pre_routing_net(in, out)->ipv4.iptable_raw);
56} 57}
57 58
58static unsigned int 59static unsigned int
@@ -70,7 +71,8 @@ ipt_local_hook(unsigned int hook,
70 "packet.\n"); 71 "packet.\n");
71 return NF_ACCEPT; 72 return NF_ACCEPT;
72 } 73 }
73 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_raw); 74 return ipt_do_table(skb, hook, in, out,
75 nf_local_out_net(in, out)->ipv4.iptable_raw);
74} 76}
75 77
76/* 'raw' is the very first table. */ 78/* 'raw' is the very first table. */
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index a65b845c5f15..cacb9cb27dab 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -23,30 +23,36 @@
23#include <net/netfilter/nf_conntrack_l3proto.h> 23#include <net/netfilter/nf_conntrack_l3proto.h>
24#include <net/netfilter/nf_conntrack_core.h> 24#include <net/netfilter/nf_conntrack_core.h>
25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
26#include <net/netfilter/nf_nat_helper.h>
26 27
27static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 28int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
28 struct nf_conntrack_tuple *tuple) 29 struct nf_conn *ct,
30 enum ip_conntrack_info ctinfo);
31EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
32
33static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
34 struct nf_conntrack_tuple *tuple)
29{ 35{
30 const __be32 *ap; 36 const __be32 *ap;
31 __be32 _addrs[2]; 37 __be32 _addrs[2];
32 ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr), 38 ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
33 sizeof(u_int32_t) * 2, _addrs); 39 sizeof(u_int32_t) * 2, _addrs);
34 if (ap == NULL) 40 if (ap == NULL)
35 return 0; 41 return false;
36 42
37 tuple->src.u3.ip = ap[0]; 43 tuple->src.u3.ip = ap[0];
38 tuple->dst.u3.ip = ap[1]; 44 tuple->dst.u3.ip = ap[1];
39 45
40 return 1; 46 return true;
41} 47}
42 48
43static int ipv4_invert_tuple(struct nf_conntrack_tuple *tuple, 49static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
44 const struct nf_conntrack_tuple *orig) 50 const struct nf_conntrack_tuple *orig)
45{ 51{
46 tuple->src.u3.ip = orig->dst.u3.ip; 52 tuple->src.u3.ip = orig->dst.u3.ip;
47 tuple->dst.u3.ip = orig->src.u3.ip; 53 tuple->dst.u3.ip = orig->src.u3.ip;
48 54
49 return 1; 55 return true;
50} 56}
51 57
52static int ipv4_print_tuple(struct seq_file *s, 58static int ipv4_print_tuple(struct seq_file *s,
@@ -101,35 +107,41 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
101 const struct net_device *out, 107 const struct net_device *out,
102 int (*okfn)(struct sk_buff *)) 108 int (*okfn)(struct sk_buff *))
103{ 109{
104 /* We've seen it coming out the other side: confirm it */
105 return nf_conntrack_confirm(skb);
106}
107
108static unsigned int ipv4_conntrack_help(unsigned int hooknum,
109 struct sk_buff *skb,
110 const struct net_device *in,
111 const struct net_device *out,
112 int (*okfn)(struct sk_buff *))
113{
114 struct nf_conn *ct; 110 struct nf_conn *ct;
115 enum ip_conntrack_info ctinfo; 111 enum ip_conntrack_info ctinfo;
116 const struct nf_conn_help *help; 112 const struct nf_conn_help *help;
117 const struct nf_conntrack_helper *helper; 113 const struct nf_conntrack_helper *helper;
114 unsigned int ret;
118 115
119 /* This is where we call the helper: as the packet goes out. */ 116 /* This is where we call the helper: as the packet goes out. */
120 ct = nf_ct_get(skb, &ctinfo); 117 ct = nf_ct_get(skb, &ctinfo);
121 if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) 118 if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
122 return NF_ACCEPT; 119 goto out;
123 120
124 help = nfct_help(ct); 121 help = nfct_help(ct);
125 if (!help) 122 if (!help)
126 return NF_ACCEPT; 123 goto out;
124
127 /* rcu_read_lock()ed by nf_hook_slow */ 125 /* rcu_read_lock()ed by nf_hook_slow */
128 helper = rcu_dereference(help->helper); 126 helper = rcu_dereference(help->helper);
129 if (!helper) 127 if (!helper)
130 return NF_ACCEPT; 128 goto out;
131 return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), 129
132 ct, ctinfo); 130 ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
131 ct, ctinfo);
132 if (ret != NF_ACCEPT)
133 return ret;
134
135 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
136 typeof(nf_nat_seq_adjust_hook) seq_adjust;
137
138 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
139 if (!seq_adjust || !seq_adjust(skb, ct, ctinfo))
140 return NF_DROP;
141 }
142out:
143 /* We've seen it coming out the other side: confirm it */
144 return nf_conntrack_confirm(skb);
133} 145}
134 146
135static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, 147static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
@@ -211,20 +223,6 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
211 .priority = NF_IP_PRI_CONNTRACK, 223 .priority = NF_IP_PRI_CONNTRACK,
212 }, 224 },
213 { 225 {
214 .hook = ipv4_conntrack_help,
215 .owner = THIS_MODULE,
216 .pf = PF_INET,
217 .hooknum = NF_INET_POST_ROUTING,
218 .priority = NF_IP_PRI_CONNTRACK_HELPER,
219 },
220 {
221 .hook = ipv4_conntrack_help,
222 .owner = THIS_MODULE,
223 .pf = PF_INET,
224 .hooknum = NF_INET_LOCAL_IN,
225 .priority = NF_IP_PRI_CONNTRACK_HELPER,
226 },
227 {
228 .hook = ipv4_confirm, 226 .hook = ipv4_confirm,
229 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
230 .pf = PF_INET, 228 .pf = PF_INET,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index f500b0fdaef4..40a46d482490 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -106,21 +106,16 @@ static int ct_seq_show(struct seq_file *s, void *v)
106 /* we only want to print DIR_ORIGINAL */ 106 /* we only want to print DIR_ORIGINAL */
107 if (NF_CT_DIRECTION(hash)) 107 if (NF_CT_DIRECTION(hash))
108 return 0; 108 return 0;
109 if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num != AF_INET) 109 if (nf_ct_l3num(ct) != AF_INET)
110 return 0; 110 return 0;
111 111
112 l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL] 112 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
113 .tuple.src.l3num);
114 NF_CT_ASSERT(l3proto); 113 NF_CT_ASSERT(l3proto);
115 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL] 114 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
116 .tuple.src.l3num,
117 ct->tuplehash[IP_CT_DIR_ORIGINAL]
118 .tuple.dst.protonum);
119 NF_CT_ASSERT(l4proto); 115 NF_CT_ASSERT(l4proto);
120 116
121 if (seq_printf(s, "%-8s %u %ld ", 117 if (seq_printf(s, "%-8s %u %ld ",
122 l4proto->name, 118 l4proto->name, nf_ct_protonum(ct),
123 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
124 timer_pending(&ct->timeout) 119 timer_pending(&ct->timeout)
125 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) 120 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
126 return -ENOSPC; 121 return -ENOSPC;
@@ -379,7 +374,7 @@ static const struct file_operations ct_cpu_seq_fops = {
379 .open = ct_cpu_seq_open, 374 .open = ct_cpu_seq_open,
380 .read = seq_read, 375 .read = seq_read,
381 .llseek = seq_lseek, 376 .llseek = seq_lseek,
382 .release = seq_release_private, 377 .release = seq_release,
383}; 378};
384 379
385int __init nf_conntrack_ipv4_compat_init(void) 380int __init nf_conntrack_ipv4_compat_init(void)
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 6873fddb3529..78ab19accace 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -22,22 +22,21 @@
22 22
23static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ; 23static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
24 24
25static int icmp_pkt_to_tuple(const struct sk_buff *skb, 25static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
26 unsigned int dataoff, 26 struct nf_conntrack_tuple *tuple)
27 struct nf_conntrack_tuple *tuple)
28{ 27{
29 const struct icmphdr *hp; 28 const struct icmphdr *hp;
30 struct icmphdr _hdr; 29 struct icmphdr _hdr;
31 30
32 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 31 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
33 if (hp == NULL) 32 if (hp == NULL)
34 return 0; 33 return false;
35 34
36 tuple->dst.u.icmp.type = hp->type; 35 tuple->dst.u.icmp.type = hp->type;
37 tuple->src.u.icmp.id = hp->un.echo.id; 36 tuple->src.u.icmp.id = hp->un.echo.id;
38 tuple->dst.u.icmp.code = hp->code; 37 tuple->dst.u.icmp.code = hp->code;
39 38
40 return 1; 39 return true;
41} 40}
42 41
43/* Add 1; spaces filled with 0. */ 42/* Add 1; spaces filled with 0. */
@@ -52,17 +51,17 @@ static const u_int8_t invmap[] = {
52 [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1 51 [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1
53}; 52};
54 53
55static int icmp_invert_tuple(struct nf_conntrack_tuple *tuple, 54static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
56 const struct nf_conntrack_tuple *orig) 55 const struct nf_conntrack_tuple *orig)
57{ 56{
58 if (orig->dst.u.icmp.type >= sizeof(invmap) 57 if (orig->dst.u.icmp.type >= sizeof(invmap)
59 || !invmap[orig->dst.u.icmp.type]) 58 || !invmap[orig->dst.u.icmp.type])
60 return 0; 59 return false;
61 60
62 tuple->src.u.icmp.id = orig->src.u.icmp.id; 61 tuple->src.u.icmp.id = orig->src.u.icmp.id;
63 tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1; 62 tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1;
64 tuple->dst.u.icmp.code = orig->dst.u.icmp.code; 63 tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
65 return 1; 64 return true;
66} 65}
67 66
68/* Print out the per-protocol part of the tuple. */ 67/* Print out the per-protocol part of the tuple. */
@@ -101,8 +100,8 @@ static int icmp_packet(struct nf_conn *ct,
101} 100}
102 101
103/* Called when a new connection for this protocol found. */ 102/* Called when a new connection for this protocol found. */
104static int icmp_new(struct nf_conn *ct, 103static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
105 const struct sk_buff *skb, unsigned int dataoff) 104 unsigned int dataoff)
106{ 105{
107 static const u_int8_t valid_new[] = { 106 static const u_int8_t valid_new[] = {
108 [ICMP_ECHO] = 1, 107 [ICMP_ECHO] = 1,
@@ -116,11 +115,11 @@ static int icmp_new(struct nf_conn *ct,
116 /* Can't create a new ICMP `conn' with this. */ 115 /* Can't create a new ICMP `conn' with this. */
117 pr_debug("icmp: can't create new conn with type %u\n", 116 pr_debug("icmp: can't create new conn with type %u\n",
118 ct->tuplehash[0].tuple.dst.u.icmp.type); 117 ct->tuplehash[0].tuple.dst.u.icmp.type);
119 NF_CT_DUMP_TUPLE(&ct->tuplehash[0].tuple); 118 nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple);
120 return 0; 119 return false;
121 } 120 }
122 atomic_set(&ct->proto.icmp.count, 0); 121 atomic_set(&ct->proto.icmp.count, 0);
123 return 1; 122 return true;
124} 123}
125 124
126/* Returns conntrack if it dealt with ICMP, and filled in skb fields */ 125/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 36b4e3bb056f..04578593e100 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -150,9 +150,9 @@ find_appropriate_src(const struct nf_conntrack_tuple *tuple,
150 const struct nf_nat_range *range) 150 const struct nf_nat_range *range)
151{ 151{
152 unsigned int h = hash_by_src(tuple); 152 unsigned int h = hash_by_src(tuple);
153 struct nf_conn_nat *nat; 153 const struct nf_conn_nat *nat;
154 struct nf_conn *ct; 154 const struct nf_conn *ct;
155 struct hlist_node *n; 155 const struct hlist_node *n;
156 156
157 rcu_read_lock(); 157 rcu_read_lock();
158 hlist_for_each_entry_rcu(nat, n, &bysource[h], bysource) { 158 hlist_for_each_entry_rcu(nat, n, &bysource[h], bysource) {
@@ -349,7 +349,7 @@ nf_nat_setup_info(struct nf_conn *ct,
349EXPORT_SYMBOL(nf_nat_setup_info); 349EXPORT_SYMBOL(nf_nat_setup_info);
350 350
351/* Returns true if succeeded. */ 351/* Returns true if succeeded. */
352static int 352static bool
353manip_pkt(u_int16_t proto, 353manip_pkt(u_int16_t proto,
354 struct sk_buff *skb, 354 struct sk_buff *skb,
355 unsigned int iphdroff, 355 unsigned int iphdroff,
@@ -360,7 +360,7 @@ manip_pkt(u_int16_t proto,
360 const struct nf_nat_protocol *p; 360 const struct nf_nat_protocol *p;
361 361
362 if (!skb_make_writable(skb, iphdroff + sizeof(*iph))) 362 if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
363 return 0; 363 return false;
364 364
365 iph = (void *)skb->data + iphdroff; 365 iph = (void *)skb->data + iphdroff;
366 366
@@ -369,7 +369,7 @@ manip_pkt(u_int16_t proto,
369 /* rcu_read_lock()ed by nf_hook_slow */ 369 /* rcu_read_lock()ed by nf_hook_slow */
370 p = __nf_nat_proto_find(proto); 370 p = __nf_nat_proto_find(proto);
371 if (!p->manip_pkt(skb, iphdroff, target, maniptype)) 371 if (!p->manip_pkt(skb, iphdroff, target, maniptype))
372 return 0; 372 return false;
373 373
374 iph = (void *)skb->data + iphdroff; 374 iph = (void *)skb->data + iphdroff;
375 375
@@ -380,7 +380,7 @@ manip_pkt(u_int16_t proto,
380 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip); 380 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
381 iph->daddr = target->dst.u3.ip; 381 iph->daddr = target->dst.u3.ip;
382 } 382 }
383 return 1; 383 return true;
384} 384}
385 385
386/* Do packet manipulations according to nf_nat_setup_info. */ 386/* Do packet manipulations according to nf_nat_setup_info. */
@@ -426,7 +426,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
426 struct icmphdr icmp; 426 struct icmphdr icmp;
427 struct iphdr ip; 427 struct iphdr ip;
428 } *inside; 428 } *inside;
429 struct nf_conntrack_l4proto *l4proto; 429 const struct nf_conntrack_l4proto *l4proto;
430 struct nf_conntrack_tuple inner, target; 430 struct nf_conntrack_tuple inner, target;
431 int hdrlen = ip_hdrlen(skb); 431 int hdrlen = ip_hdrlen(skb);
432 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 432 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -544,46 +544,6 @@ void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
544} 544}
545EXPORT_SYMBOL(nf_nat_protocol_unregister); 545EXPORT_SYMBOL(nf_nat_protocol_unregister);
546 546
547#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
548int
549nf_nat_port_range_to_nlattr(struct sk_buff *skb,
550 const struct nf_nat_range *range)
551{
552 NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.tcp.port);
553 NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.tcp.port);
554
555 return 0;
556
557nla_put_failure:
558 return -1;
559}
560EXPORT_SYMBOL_GPL(nf_nat_port_nlattr_to_range);
561
562int
563nf_nat_port_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range)
564{
565 int ret = 0;
566
567 /* we have to return whether we actually parsed something or not */
568
569 if (tb[CTA_PROTONAT_PORT_MIN]) {
570 ret = 1;
571 range->min.tcp.port = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
572 }
573
574 if (!tb[CTA_PROTONAT_PORT_MAX]) {
575 if (ret)
576 range->max.tcp.port = range->min.tcp.port;
577 } else {
578 ret = 1;
579 range->max.tcp.port = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
580 }
581
582 return ret;
583}
584EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nlattr);
585#endif
586
587/* Noone using conntrack by the time this called. */ 547/* Noone using conntrack by the time this called. */
588static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 548static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
589{ 549{
@@ -660,6 +620,9 @@ static int __init nf_nat_init(void)
660 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; 620 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
661 621
662 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 622 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
623
624 BUG_ON(nf_nat_seq_adjust_hook != NULL);
625 rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
663 return 0; 626 return 0;
664 627
665 cleanup_extend: 628 cleanup_extend:
@@ -686,6 +649,8 @@ static void __exit nf_nat_cleanup(void)
686 nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size); 649 nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
687 nf_ct_l3proto_put(l3proto); 650 nf_ct_l3proto_put(l3proto);
688 nf_ct_extend_unregister(&nat_extend); 651 nf_ct_extend_unregister(&nat_extend);
652 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
653 synchronize_net();
689} 654}
690 655
691MODULE_LICENSE("GPL"); 656MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index ca57f47bbd25..11976ea29884 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -139,7 +139,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
139 const char *rep_buffer, 139 const char *rep_buffer,
140 unsigned int rep_len) 140 unsigned int rep_len)
141{ 141{
142 struct rtable *rt = (struct rtable *)skb->dst; 142 struct rtable *rt = skb->rtable;
143 struct iphdr *iph; 143 struct iphdr *iph;
144 struct tcphdr *tcph; 144 struct tcphdr *tcph;
145 int oldlen, datalen; 145 int oldlen, datalen;
@@ -217,7 +217,7 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
217 const char *rep_buffer, 217 const char *rep_buffer,
218 unsigned int rep_len) 218 unsigned int rep_len)
219{ 219{
220 struct rtable *rt = (struct rtable *)skb->dst; 220 struct rtable *rt = skb->rtable;
221 struct iphdr *iph; 221 struct iphdr *iph;
222 struct udphdr *udph; 222 struct udphdr *udph;
223 int datalen, oldlen; 223 int datalen, oldlen;
@@ -416,7 +416,6 @@ nf_nat_seq_adjust(struct sk_buff *skb,
416 416
417 return 1; 417 return 1;
418} 418}
419EXPORT_SYMBOL(nf_nat_seq_adjust);
420 419
421/* Setup NAT on this expected conntrack so it follows master. */ 420/* Setup NAT on this expected conntrack so it follows master. */
422/* If we fail to get a free NAT slot, we'll get dropped on confirm */ 421/* If we fail to get a free NAT slot, we'll get dropped on confirm */
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 3a1e6d6afc0a..da3d91a5ef5c 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -72,7 +72,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
72 } 72 }
73 73
74 pr_debug("trying to unexpect other dir: "); 74 pr_debug("trying to unexpect other dir: ");
75 NF_CT_DUMP_TUPLE(&t); 75 nf_ct_dump_tuple_ip(&t);
76 other_exp = nf_ct_expect_find_get(&t); 76 other_exp = nf_ct_expect_find_get(&t);
77 if (other_exp) { 77 if (other_exp) {
78 nf_ct_unexpect_related(other_exp); 78 nf_ct_unexpect_related(other_exp);
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
new file mode 100644
index 000000000000..91537f11273f
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -0,0 +1,120 @@
1/* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
3 * (C) 2008 Patrick McHardy <kaber@trash.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/types.h>
11#include <linux/random.h>
12#include <linux/ip.h>
13
14#include <linux/netfilter.h>
15#include <net/netfilter/nf_nat.h>
16#include <net/netfilter/nf_nat_core.h>
17#include <net/netfilter/nf_nat_rule.h>
18#include <net/netfilter/nf_nat_protocol.h>
19
20bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
21 enum nf_nat_manip_type maniptype,
22 const union nf_conntrack_man_proto *min,
23 const union nf_conntrack_man_proto *max)
24{
25 __be16 port;
26
27 if (maniptype == IP_NAT_MANIP_SRC)
28 port = tuple->src.u.all;
29 else
30 port = tuple->dst.u.all;
31
32 return ntohs(port) >= ntohs(min->all) &&
33 ntohs(port) <= ntohs(max->all);
34}
35EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
36
37bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
38 const struct nf_nat_range *range,
39 enum nf_nat_manip_type maniptype,
40 const struct nf_conn *ct,
41 u_int16_t *rover)
42{
43 unsigned int range_size, min, i;
44 __be16 *portptr;
45 u_int16_t off;
46
47 if (maniptype == IP_NAT_MANIP_SRC)
48 portptr = &tuple->src.u.all;
49 else
50 portptr = &tuple->dst.u.all;
51
52 /* If no range specified... */
53 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
54 /* If it's dst rewrite, can't change port */
55 if (maniptype == IP_NAT_MANIP_DST)
56 return false;
57
58 if (ntohs(*portptr) < 1024) {
59 /* Loose convention: >> 512 is credential passing */
60 if (ntohs(*portptr) < 512) {
61 min = 1;
62 range_size = 511 - min + 1;
63 } else {
64 min = 600;
65 range_size = 1023 - min + 1;
66 }
67 } else {
68 min = 1024;
69 range_size = 65535 - 1024 + 1;
70 }
71 } else {
72 min = ntohs(range->min.all);
73 range_size = ntohs(range->max.all) - min + 1;
74 }
75
76 off = *rover;
77 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
78 off = net_random();
79
80 for (i = 0; i < range_size; i++, off++) {
81 *portptr = htons(min + off % range_size);
82 if (nf_nat_used_tuple(tuple, ct))
83 continue;
84 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
85 *rover = off;
86 return true;
87 }
88 return false;
89}
90EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
91
92#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
93int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
94 const struct nf_nat_range *range)
95{
96 NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.all);
97 NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.all);
98 return 0;
99
100nla_put_failure:
101 return -1;
102}
103EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
104
105int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
106 struct nf_nat_range *range)
107{
108 if (tb[CTA_PROTONAT_PORT_MIN]) {
109 range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
110 range->max.all = range->min.tcp.port;
111 range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
112 }
113 if (tb[CTA_PROTONAT_PORT_MAX]) {
114 range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
115 range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
116 }
117 return 0;
118}
119EXPORT_SYMBOL_GPL(nf_nat_proto_range_to_nlattr);
120#endif
diff --git a/net/ipv4/netfilter/nf_nat_proto_dccp.c b/net/ipv4/netfilter/nf_nat_proto_dccp.c
new file mode 100644
index 000000000000..22485ce306d4
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_dccp.c
@@ -0,0 +1,108 @@
1/*
2 * DCCP NAT protocol helper
3 *
4 * Copyright (c) 2005, 2006. 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/skbuff.h>
16#include <linux/ip.h>
17#include <linux/dccp.h>
18
19#include <net/netfilter/nf_conntrack.h>
20#include <net/netfilter/nf_nat.h>
21#include <net/netfilter/nf_nat_protocol.h>
22
23static u_int16_t dccp_port_rover;
24
25static bool
26dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
27 const struct nf_nat_range *range,
28 enum nf_nat_manip_type maniptype,
29 const struct nf_conn *ct)
30{
31 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
32 &dccp_port_rover);
33}
34
35static bool
36dccp_manip_pkt(struct sk_buff *skb,
37 unsigned int iphdroff,
38 const struct nf_conntrack_tuple *tuple,
39 enum nf_nat_manip_type maniptype)
40{
41 const struct iphdr *iph = (const void *)(skb->data + iphdroff);
42 struct dccp_hdr *hdr;
43 unsigned int hdroff = iphdroff + iph->ihl * 4;
44 __be32 oldip, newip;
45 __be16 *portptr, oldport, newport;
46 int hdrsize = 8; /* DCCP connection tracking guarantees this much */
47
48 if (skb->len >= hdroff + sizeof(struct dccp_hdr))
49 hdrsize = sizeof(struct dccp_hdr);
50
51 if (!skb_make_writable(skb, hdroff + hdrsize))
52 return false;
53
54 iph = (struct iphdr *)(skb->data + iphdroff);
55 hdr = (struct dccp_hdr *)(skb->data + hdroff);
56
57 if (maniptype == IP_NAT_MANIP_SRC) {
58 oldip = iph->saddr;
59 newip = tuple->src.u3.ip;
60 newport = tuple->src.u.dccp.port;
61 portptr = &hdr->dccph_sport;
62 } else {
63 oldip = iph->daddr;
64 newip = tuple->dst.u3.ip;
65 newport = tuple->dst.u.dccp.port;
66 portptr = &hdr->dccph_dport;
67 }
68
69 oldport = *portptr;
70 *portptr = newport;
71
72 if (hdrsize < sizeof(*hdr))
73 return true;
74
75 inet_proto_csum_replace4(&hdr->dccph_checksum, skb, oldip, newip, 1);
76 inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
77 0);
78 return true;
79}
80
81static const struct nf_nat_protocol nf_nat_protocol_dccp = {
82 .protonum = IPPROTO_DCCP,
83 .me = THIS_MODULE,
84 .manip_pkt = dccp_manip_pkt,
85 .in_range = nf_nat_proto_in_range,
86 .unique_tuple = dccp_unique_tuple,
87#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
88 .range_to_nlattr = nf_nat_proto_range_to_nlattr,
89 .nlattr_to_range = nf_nat_proto_nlattr_to_range,
90#endif
91};
92
93static int __init nf_nat_proto_dccp_init(void)
94{
95 return nf_nat_protocol_register(&nf_nat_protocol_dccp);
96}
97
98static void __exit nf_nat_proto_dccp_fini(void)
99{
100 nf_nat_protocol_unregister(&nf_nat_protocol_dccp);
101}
102
103module_init(nf_nat_proto_dccp_init);
104module_exit(nf_nat_proto_dccp_fini);
105
106MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
107MODULE_DESCRIPTION("DCCP NAT protocol helper");
108MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index a1e4da16da2e..d7e89201351e 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -36,26 +36,8 @@ MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); 36MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
37MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); 37MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
38 38
39/* is key in given range between min and max */
40static int
41gre_in_range(const struct nf_conntrack_tuple *tuple,
42 enum nf_nat_manip_type maniptype,
43 const union nf_conntrack_man_proto *min,
44 const union nf_conntrack_man_proto *max)
45{
46 __be16 key;
47
48 if (maniptype == IP_NAT_MANIP_SRC)
49 key = tuple->src.u.gre.key;
50 else
51 key = tuple->dst.u.gre.key;
52
53 return ntohs(key) >= ntohs(min->gre.key) &&
54 ntohs(key) <= ntohs(max->gre.key);
55}
56
57/* generate unique tuple ... */ 39/* generate unique tuple ... */
58static int 40static bool
59gre_unique_tuple(struct nf_conntrack_tuple *tuple, 41gre_unique_tuple(struct nf_conntrack_tuple *tuple,
60 const struct nf_nat_range *range, 42 const struct nf_nat_range *range,
61 enum nf_nat_manip_type maniptype, 43 enum nf_nat_manip_type maniptype,
@@ -68,7 +50,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
68 /* If there is no master conntrack we are not PPTP, 50 /* If there is no master conntrack we are not PPTP,
69 do not change tuples */ 51 do not change tuples */
70 if (!ct->master) 52 if (!ct->master)
71 return 0; 53 return false;
72 54
73 if (maniptype == IP_NAT_MANIP_SRC) 55 if (maniptype == IP_NAT_MANIP_SRC)
74 keyptr = &tuple->src.u.gre.key; 56 keyptr = &tuple->src.u.gre.key;
@@ -89,20 +71,20 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
89 for (i = 0; i < range_size; i++, key++) { 71 for (i = 0; i < range_size; i++, key++) {
90 *keyptr = htons(min + key % range_size); 72 *keyptr = htons(min + key % range_size);
91 if (!nf_nat_used_tuple(tuple, ct)) 73 if (!nf_nat_used_tuple(tuple, ct))
92 return 1; 74 return true;
93 } 75 }
94 76
95 pr_debug("%p: no NAT mapping\n", ct); 77 pr_debug("%p: no NAT mapping\n", ct);
96 return 0; 78 return false;
97} 79}
98 80
99/* manipulate a GRE packet according to maniptype */ 81/* manipulate a GRE packet according to maniptype */
100static int 82static bool
101gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, 83gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
102 const struct nf_conntrack_tuple *tuple, 84 const struct nf_conntrack_tuple *tuple,
103 enum nf_nat_manip_type maniptype) 85 enum nf_nat_manip_type maniptype)
104{ 86{
105 struct gre_hdr *greh; 87 const struct gre_hdr *greh;
106 struct gre_hdr_pptp *pgreh; 88 struct gre_hdr_pptp *pgreh;
107 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); 89 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
108 unsigned int hdroff = iphdroff + iph->ihl * 4; 90 unsigned int hdroff = iphdroff + iph->ihl * 4;
@@ -110,7 +92,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
110 /* pgreh includes two optional 32bit fields which are not required 92 /* pgreh includes two optional 32bit fields which are not required
111 * to be there. That's where the magic '8' comes from */ 93 * to be there. That's where the magic '8' comes from */
112 if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8)) 94 if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8))
113 return 0; 95 return false;
114 96
115 greh = (void *)skb->data + hdroff; 97 greh = (void *)skb->data + hdroff;
116 pgreh = (struct gre_hdr_pptp *)greh; 98 pgreh = (struct gre_hdr_pptp *)greh;
@@ -118,7 +100,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
118 /* we only have destination manip of a packet, since 'source key' 100 /* we only have destination manip of a packet, since 'source key'
119 * is not present in the packet itself */ 101 * is not present in the packet itself */
120 if (maniptype != IP_NAT_MANIP_DST) 102 if (maniptype != IP_NAT_MANIP_DST)
121 return 1; 103 return true;
122 switch (greh->version) { 104 switch (greh->version) {
123 case GRE_VERSION_1701: 105 case GRE_VERSION_1701:
124 /* We do not currently NAT any GREv0 packets. 106 /* We do not currently NAT any GREv0 packets.
@@ -130,21 +112,20 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
130 break; 112 break;
131 default: 113 default:
132 pr_debug("can't nat unknown GRE version\n"); 114 pr_debug("can't nat unknown GRE version\n");
133 return 0; 115 return false;
134 } 116 }
135 return 1; 117 return true;
136} 118}
137 119
138static const struct nf_nat_protocol gre = { 120static const struct nf_nat_protocol gre = {
139 .name = "GRE",
140 .protonum = IPPROTO_GRE, 121 .protonum = IPPROTO_GRE,
141 .me = THIS_MODULE, 122 .me = THIS_MODULE,
142 .manip_pkt = gre_manip_pkt, 123 .manip_pkt = gre_manip_pkt,
143 .in_range = gre_in_range, 124 .in_range = nf_nat_proto_in_range,
144 .unique_tuple = gre_unique_tuple, 125 .unique_tuple = gre_unique_tuple,
145#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 126#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
146 .range_to_nlattr = nf_nat_port_range_to_nlattr, 127 .range_to_nlattr = nf_nat_proto_range_to_nlattr,
147 .nlattr_to_range = nf_nat_port_nlattr_to_range, 128 .nlattr_to_range = nf_nat_proto_nlattr_to_range,
148#endif 129#endif
149}; 130};
150 131
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 03a02969aa57..19a8b0b07d8e 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -17,7 +17,7 @@
17#include <net/netfilter/nf_nat_rule.h> 17#include <net/netfilter/nf_nat_rule.h>
18#include <net/netfilter/nf_nat_protocol.h> 18#include <net/netfilter/nf_nat_protocol.h>
19 19
20static int 20static bool
21icmp_in_range(const struct nf_conntrack_tuple *tuple, 21icmp_in_range(const struct nf_conntrack_tuple *tuple,
22 enum nf_nat_manip_type maniptype, 22 enum nf_nat_manip_type maniptype,
23 const union nf_conntrack_man_proto *min, 23 const union nf_conntrack_man_proto *min,
@@ -27,7 +27,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
27 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); 27 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
28} 28}
29 29
30static int 30static bool
31icmp_unique_tuple(struct nf_conntrack_tuple *tuple, 31icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
32 const struct nf_nat_range *range, 32 const struct nf_nat_range *range,
33 enum nf_nat_manip_type maniptype, 33 enum nf_nat_manip_type maniptype,
@@ -46,12 +46,12 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
46 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + 46 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
47 (id % range_size)); 47 (id % range_size));
48 if (!nf_nat_used_tuple(tuple, ct)) 48 if (!nf_nat_used_tuple(tuple, ct))
49 return 1; 49 return true;
50 } 50 }
51 return 0; 51 return false;
52} 52}
53 53
54static int 54static bool
55icmp_manip_pkt(struct sk_buff *skb, 55icmp_manip_pkt(struct sk_buff *skb,
56 unsigned int iphdroff, 56 unsigned int iphdroff,
57 const struct nf_conntrack_tuple *tuple, 57 const struct nf_conntrack_tuple *tuple,
@@ -62,24 +62,23 @@ icmp_manip_pkt(struct sk_buff *skb,
62 unsigned int hdroff = iphdroff + iph->ihl*4; 62 unsigned int hdroff = iphdroff + iph->ihl*4;
63 63
64 if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 64 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
65 return 0; 65 return false;
66 66
67 hdr = (struct icmphdr *)(skb->data + hdroff); 67 hdr = (struct icmphdr *)(skb->data + hdroff);
68 inet_proto_csum_replace2(&hdr->checksum, skb, 68 inet_proto_csum_replace2(&hdr->checksum, skb,
69 hdr->un.echo.id, tuple->src.u.icmp.id, 0); 69 hdr->un.echo.id, tuple->src.u.icmp.id, 0);
70 hdr->un.echo.id = tuple->src.u.icmp.id; 70 hdr->un.echo.id = tuple->src.u.icmp.id;
71 return 1; 71 return true;
72} 72}
73 73
74const struct nf_nat_protocol nf_nat_protocol_icmp = { 74const struct nf_nat_protocol nf_nat_protocol_icmp = {
75 .name = "ICMP",
76 .protonum = IPPROTO_ICMP, 75 .protonum = IPPROTO_ICMP,
77 .me = THIS_MODULE, 76 .me = THIS_MODULE,
78 .manip_pkt = icmp_manip_pkt, 77 .manip_pkt = icmp_manip_pkt,
79 .in_range = icmp_in_range, 78 .in_range = icmp_in_range,
80 .unique_tuple = icmp_unique_tuple, 79 .unique_tuple = icmp_unique_tuple,
81#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 80#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
82 .range_to_nlattr = nf_nat_port_range_to_nlattr, 81 .range_to_nlattr = nf_nat_proto_range_to_nlattr,
83 .nlattr_to_range = nf_nat_port_nlattr_to_range, 82 .nlattr_to_range = nf_nat_proto_nlattr_to_range,
84#endif 83#endif
85}; 84};
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
new file mode 100644
index 000000000000..82e4c0e286b8
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/types.h>
10#include <linux/init.h>
11#include <linux/ip.h>
12#include <linux/sctp.h>
13#include <net/sctp/checksum.h>
14
15#include <net/netfilter/nf_nat_protocol.h>
16
17static u_int16_t nf_sctp_port_rover;
18
19static bool
20sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
21 const struct nf_nat_range *range,
22 enum nf_nat_manip_type maniptype,
23 const struct nf_conn *ct)
24{
25 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
26 &nf_sctp_port_rover);
27}
28
29static bool
30sctp_manip_pkt(struct sk_buff *skb,
31 unsigned int iphdroff,
32 const struct nf_conntrack_tuple *tuple,
33 enum nf_nat_manip_type maniptype)
34{
35 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
36 sctp_sctphdr_t *hdr;
37 unsigned int hdroff = iphdroff + iph->ihl*4;
38 __be32 oldip, newip;
39 u32 crc32;
40
41 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
42 return false;
43
44 iph = (struct iphdr *)(skb->data + iphdroff);
45 hdr = (struct sctphdr *)(skb->data + hdroff);
46
47 if (maniptype == IP_NAT_MANIP_SRC) {
48 /* Get rid of src ip and src pt */
49 oldip = iph->saddr;
50 newip = tuple->src.u3.ip;
51 hdr->source = tuple->src.u.sctp.port;
52 } else {
53 /* Get rid of dst ip and dst pt */
54 oldip = iph->daddr;
55 newip = tuple->dst.u3.ip;
56 hdr->dest = tuple->dst.u.sctp.port;
57 }
58
59 crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
60 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
61 crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb),
62 crc32);
63 crc32 = sctp_end_cksum(crc32);
64 hdr->checksum = htonl(crc32);
65
66 return true;
67}
68
69static const struct nf_nat_protocol nf_nat_protocol_sctp = {
70 .protonum = IPPROTO_SCTP,
71 .me = THIS_MODULE,
72 .manip_pkt = sctp_manip_pkt,
73 .in_range = nf_nat_proto_in_range,
74 .unique_tuple = sctp_unique_tuple,
75#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
76 .range_to_nlattr = nf_nat_proto_range_to_nlattr,
77 .nlattr_to_range = nf_nat_proto_nlattr_to_range,
78#endif
79};
80
81static int __init nf_nat_proto_sctp_init(void)
82{
83 return nf_nat_protocol_register(&nf_nat_protocol_sctp);
84}
85
86static void __exit nf_nat_proto_sctp_exit(void)
87{
88 nf_nat_protocol_unregister(&nf_nat_protocol_sctp);
89}
90
91module_init(nf_nat_proto_sctp_init);
92module_exit(nf_nat_proto_sctp_exit);
93
94MODULE_LICENSE("GPL");
95MODULE_DESCRIPTION("SCTP NAT protocol helper");
96MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index ffd5d1589eca..399e2cfa263b 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
12#include <linux/ip.h> 11#include <linux/ip.h>
13#include <linux/tcp.h> 12#include <linux/tcp.h>
14 13
@@ -19,75 +18,19 @@
19#include <net/netfilter/nf_nat_protocol.h> 18#include <net/netfilter/nf_nat_protocol.h>
20#include <net/netfilter/nf_nat_core.h> 19#include <net/netfilter/nf_nat_core.h>
21 20
22static int 21static u_int16_t tcp_port_rover;
23tcp_in_range(const struct nf_conntrack_tuple *tuple,
24 enum nf_nat_manip_type maniptype,
25 const union nf_conntrack_man_proto *min,
26 const union nf_conntrack_man_proto *max)
27{
28 __be16 port;
29
30 if (maniptype == IP_NAT_MANIP_SRC)
31 port = tuple->src.u.tcp.port;
32 else
33 port = tuple->dst.u.tcp.port;
34
35 return ntohs(port) >= ntohs(min->tcp.port) &&
36 ntohs(port) <= ntohs(max->tcp.port);
37}
38 22
39static int 23static bool
40tcp_unique_tuple(struct nf_conntrack_tuple *tuple, 24tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
41 const struct nf_nat_range *range, 25 const struct nf_nat_range *range,
42 enum nf_nat_manip_type maniptype, 26 enum nf_nat_manip_type maniptype,
43 const struct nf_conn *ct) 27 const struct nf_conn *ct)
44{ 28{
45 static u_int16_t port; 29 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
46 __be16 *portptr; 30 &tcp_port_rover);
47 unsigned int range_size, min, i;
48
49 if (maniptype == IP_NAT_MANIP_SRC)
50 portptr = &tuple->src.u.tcp.port;
51 else
52 portptr = &tuple->dst.u.tcp.port;
53
54 /* If no range specified... */
55 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
56 /* If it's dst rewrite, can't change port */
57 if (maniptype == IP_NAT_MANIP_DST)
58 return 0;
59
60 /* Map privileged onto privileged. */
61 if (ntohs(*portptr) < 1024) {
62 /* Loose convention: >> 512 is credential passing */
63 if (ntohs(*portptr)<512) {
64 min = 1;
65 range_size = 511 - min + 1;
66 } else {
67 min = 600;
68 range_size = 1023 - min + 1;
69 }
70 } else {
71 min = 1024;
72 range_size = 65535 - 1024 + 1;
73 }
74 } else {
75 min = ntohs(range->min.tcp.port);
76 range_size = ntohs(range->max.tcp.port) - min + 1;
77 }
78
79 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
80 port = net_random();
81
82 for (i = 0; i < range_size; i++, port++) {
83 *portptr = htons(min + port % range_size);
84 if (!nf_nat_used_tuple(tuple, ct))
85 return 1;
86 }
87 return 0;
88} 31}
89 32
90static int 33static bool
91tcp_manip_pkt(struct sk_buff *skb, 34tcp_manip_pkt(struct sk_buff *skb,
92 unsigned int iphdroff, 35 unsigned int iphdroff,
93 const struct nf_conntrack_tuple *tuple, 36 const struct nf_conntrack_tuple *tuple,
@@ -107,7 +50,7 @@ tcp_manip_pkt(struct sk_buff *skb,
107 hdrsize = sizeof(struct tcphdr); 50 hdrsize = sizeof(struct tcphdr);
108 51
109 if (!skb_make_writable(skb, hdroff + hdrsize)) 52 if (!skb_make_writable(skb, hdroff + hdrsize))
110 return 0; 53 return false;
111 54
112 iph = (struct iphdr *)(skb->data + iphdroff); 55 iph = (struct iphdr *)(skb->data + iphdroff);
113 hdr = (struct tcphdr *)(skb->data + hdroff); 56 hdr = (struct tcphdr *)(skb->data + hdroff);
@@ -130,22 +73,21 @@ tcp_manip_pkt(struct sk_buff *skb,
130 *portptr = newport; 73 *portptr = newport;
131 74
132 if (hdrsize < sizeof(*hdr)) 75 if (hdrsize < sizeof(*hdr))
133 return 1; 76 return true;
134 77
135 inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); 78 inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
136 inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0); 79 inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
137 return 1; 80 return true;
138} 81}
139 82
140const struct nf_nat_protocol nf_nat_protocol_tcp = { 83const struct nf_nat_protocol nf_nat_protocol_tcp = {
141 .name = "TCP",
142 .protonum = IPPROTO_TCP, 84 .protonum = IPPROTO_TCP,
143 .me = THIS_MODULE, 85 .me = THIS_MODULE,
144 .manip_pkt = tcp_manip_pkt, 86 .manip_pkt = tcp_manip_pkt,
145 .in_range = tcp_in_range, 87 .in_range = nf_nat_proto_in_range,
146 .unique_tuple = tcp_unique_tuple, 88 .unique_tuple = tcp_unique_tuple,
147#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 89#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
148 .range_to_nlattr = nf_nat_port_range_to_nlattr, 90 .range_to_nlattr = nf_nat_proto_range_to_nlattr,
149 .nlattr_to_range = nf_nat_port_nlattr_to_range, 91 .nlattr_to_range = nf_nat_proto_nlattr_to_range,
150#endif 92#endif
151}; 93};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index 4b8f49910ff2..9e61c79492e4 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
12#include <linux/ip.h> 11#include <linux/ip.h>
13#include <linux/udp.h> 12#include <linux/udp.h>
14 13
@@ -18,74 +17,19 @@
18#include <net/netfilter/nf_nat_rule.h> 17#include <net/netfilter/nf_nat_rule.h>
19#include <net/netfilter/nf_nat_protocol.h> 18#include <net/netfilter/nf_nat_protocol.h>
20 19
21static int 20static u_int16_t udp_port_rover;
22udp_in_range(const struct nf_conntrack_tuple *tuple,
23 enum nf_nat_manip_type maniptype,
24 const union nf_conntrack_man_proto *min,
25 const union nf_conntrack_man_proto *max)
26{
27 __be16 port;
28
29 if (maniptype == IP_NAT_MANIP_SRC)
30 port = tuple->src.u.udp.port;
31 else
32 port = tuple->dst.u.udp.port;
33
34 return ntohs(port) >= ntohs(min->udp.port) &&
35 ntohs(port) <= ntohs(max->udp.port);
36}
37 21
38static int 22static bool
39udp_unique_tuple(struct nf_conntrack_tuple *tuple, 23udp_unique_tuple(struct nf_conntrack_tuple *tuple,
40 const struct nf_nat_range *range, 24 const struct nf_nat_range *range,
41 enum nf_nat_manip_type maniptype, 25 enum nf_nat_manip_type maniptype,
42 const struct nf_conn *ct) 26 const struct nf_conn *ct)
43{ 27{
44 static u_int16_t port; 28 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
45 __be16 *portptr; 29 &udp_port_rover);
46 unsigned int range_size, min, i;
47
48 if (maniptype == IP_NAT_MANIP_SRC)
49 portptr = &tuple->src.u.udp.port;
50 else
51 portptr = &tuple->dst.u.udp.port;
52
53 /* If no range specified... */
54 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
55 /* If it's dst rewrite, can't change port */
56 if (maniptype == IP_NAT_MANIP_DST)
57 return 0;
58
59 if (ntohs(*portptr) < 1024) {
60 /* Loose convention: >> 512 is credential passing */
61 if (ntohs(*portptr)<512) {
62 min = 1;
63 range_size = 511 - min + 1;
64 } else {
65 min = 600;
66 range_size = 1023 - min + 1;
67 }
68 } else {
69 min = 1024;
70 range_size = 65535 - 1024 + 1;
71 }
72 } else {
73 min = ntohs(range->min.udp.port);
74 range_size = ntohs(range->max.udp.port) - min + 1;
75 }
76
77 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
78 port = net_random();
79
80 for (i = 0; i < range_size; i++, port++) {
81 *portptr = htons(min + port % range_size);
82 if (!nf_nat_used_tuple(tuple, ct))
83 return 1;
84 }
85 return 0;
86} 30}
87 31
88static int 32static bool
89udp_manip_pkt(struct sk_buff *skb, 33udp_manip_pkt(struct sk_buff *skb,
90 unsigned int iphdroff, 34 unsigned int iphdroff,
91 const struct nf_conntrack_tuple *tuple, 35 const struct nf_conntrack_tuple *tuple,
@@ -98,7 +42,7 @@ udp_manip_pkt(struct sk_buff *skb,
98 __be16 *portptr, newport; 42 __be16 *portptr, newport;
99 43
100 if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 44 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
101 return 0; 45 return false;
102 46
103 iph = (struct iphdr *)(skb->data + iphdroff); 47 iph = (struct iphdr *)(skb->data + iphdroff);
104 hdr = (struct udphdr *)(skb->data + hdroff); 48 hdr = (struct udphdr *)(skb->data + hdroff);
@@ -124,18 +68,17 @@ udp_manip_pkt(struct sk_buff *skb,
124 hdr->check = CSUM_MANGLED_0; 68 hdr->check = CSUM_MANGLED_0;
125 } 69 }
126 *portptr = newport; 70 *portptr = newport;
127 return 1; 71 return true;
128} 72}
129 73
130const struct nf_nat_protocol nf_nat_protocol_udp = { 74const struct nf_nat_protocol nf_nat_protocol_udp = {
131 .name = "UDP",
132 .protonum = IPPROTO_UDP, 75 .protonum = IPPROTO_UDP,
133 .me = THIS_MODULE, 76 .me = THIS_MODULE,
134 .manip_pkt = udp_manip_pkt, 77 .manip_pkt = udp_manip_pkt,
135 .in_range = udp_in_range, 78 .in_range = nf_nat_proto_in_range,
136 .unique_tuple = udp_unique_tuple, 79 .unique_tuple = udp_unique_tuple,
137#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 80#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
138 .range_to_nlattr = nf_nat_port_range_to_nlattr, 81 .range_to_nlattr = nf_nat_proto_range_to_nlattr,
139 .nlattr_to_range = nf_nat_port_nlattr_to_range, 82 .nlattr_to_range = nf_nat_proto_nlattr_to_range,
140#endif 83#endif
141}; 84};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c
new file mode 100644
index 000000000000..440a229bbd87
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_udplite.c
@@ -0,0 +1,99 @@
1/* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
3 * (C) 2008 Patrick McHardy <kaber@trash.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/types.h>
11#include <linux/init.h>
12#include <linux/ip.h>
13#include <linux/udp.h>
14
15#include <linux/netfilter.h>
16#include <net/netfilter/nf_nat.h>
17#include <net/netfilter/nf_nat_protocol.h>
18
19static u_int16_t udplite_port_rover;
20
21static bool
22udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
23 const struct nf_nat_range *range,
24 enum nf_nat_manip_type maniptype,
25 const struct nf_conn *ct)
26{
27 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
28 &udplite_port_rover);
29}
30
31static bool
32udplite_manip_pkt(struct sk_buff *skb,
33 unsigned int iphdroff,
34 const struct nf_conntrack_tuple *tuple,
35 enum nf_nat_manip_type maniptype)
36{
37 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
38 struct udphdr *hdr;
39 unsigned int hdroff = iphdroff + iph->ihl*4;
40 __be32 oldip, newip;
41 __be16 *portptr, newport;
42
43 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
44 return false;
45
46 iph = (struct iphdr *)(skb->data + iphdroff);
47 hdr = (struct udphdr *)(skb->data + hdroff);
48
49 if (maniptype == IP_NAT_MANIP_SRC) {
50 /* Get rid of src ip and src pt */
51 oldip = iph->saddr;
52 newip = tuple->src.u3.ip;
53 newport = tuple->src.u.udp.port;
54 portptr = &hdr->source;
55 } else {
56 /* Get rid of dst ip and dst pt */
57 oldip = iph->daddr;
58 newip = tuple->dst.u3.ip;
59 newport = tuple->dst.u.udp.port;
60 portptr = &hdr->dest;
61 }
62
63 inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
64 inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
65 if (!hdr->check)
66 hdr->check = CSUM_MANGLED_0;
67
68 *portptr = newport;
69 return true;
70}
71
72static const struct nf_nat_protocol nf_nat_protocol_udplite = {
73 .protonum = IPPROTO_UDPLITE,
74 .me = THIS_MODULE,
75 .manip_pkt = udplite_manip_pkt,
76 .in_range = nf_nat_proto_in_range,
77 .unique_tuple = udplite_unique_tuple,
78#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
79 .range_to_nlattr = nf_nat_proto_range_to_nlattr,
80 .nlattr_to_range = nf_nat_proto_nlattr_to_range,
81#endif
82};
83
84static int __init nf_nat_proto_udplite_init(void)
85{
86 return nf_nat_protocol_register(&nf_nat_protocol_udplite);
87}
88
89static void __exit nf_nat_proto_udplite_fini(void)
90{
91 nf_nat_protocol_unregister(&nf_nat_protocol_udplite);
92}
93
94module_init(nf_nat_proto_udplite_init);
95module_exit(nf_nat_proto_udplite_fini);
96
97MODULE_LICENSE("GPL");
98MODULE_DESCRIPTION("UDP-Lite NAT protocol helper");
99MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c
index a26efeb073cb..14381c62acea 100644
--- a/net/ipv4/netfilter/nf_nat_proto_unknown.c
+++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c
@@ -18,35 +18,34 @@
18#include <net/netfilter/nf_nat_rule.h> 18#include <net/netfilter/nf_nat_rule.h>
19#include <net/netfilter/nf_nat_protocol.h> 19#include <net/netfilter/nf_nat_protocol.h>
20 20
21static int unknown_in_range(const struct nf_conntrack_tuple *tuple, 21static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
22 enum nf_nat_manip_type manip_type, 22 enum nf_nat_manip_type manip_type,
23 const union nf_conntrack_man_proto *min, 23 const union nf_conntrack_man_proto *min,
24 const union nf_conntrack_man_proto *max) 24 const union nf_conntrack_man_proto *max)
25{ 25{
26 return 1; 26 return true;
27} 27}
28 28
29static int unknown_unique_tuple(struct nf_conntrack_tuple *tuple, 29static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
30 const struct nf_nat_range *range, 30 const struct nf_nat_range *range,
31 enum nf_nat_manip_type maniptype, 31 enum nf_nat_manip_type maniptype,
32 const struct nf_conn *ct) 32 const struct nf_conn *ct)
33{ 33{
34 /* Sorry: we can't help you; if it's not unique, we can't frob 34 /* Sorry: we can't help you; if it's not unique, we can't frob
35 anything. */ 35 anything. */
36 return 0; 36 return false;
37} 37}
38 38
39static int 39static bool
40unknown_manip_pkt(struct sk_buff *skb, 40unknown_manip_pkt(struct sk_buff *skb,
41 unsigned int iphdroff, 41 unsigned int iphdroff,
42 const struct nf_conntrack_tuple *tuple, 42 const struct nf_conntrack_tuple *tuple,
43 enum nf_nat_manip_type maniptype) 43 enum nf_nat_manip_type maniptype)
44{ 44{
45 return 1; 45 return true;
46} 46}
47 47
48const struct nf_nat_protocol nf_nat_unknown_protocol = { 48const struct nf_nat_protocol nf_nat_unknown_protocol = {
49 .name = "unknown",
50 /* .me isn't set: getting a ref to this cannot fail. */ 49 /* .me isn't set: getting a ref to this cannot fail. */
51 .manip_pkt = unknown_manip_pkt, 50 .manip_pkt = unknown_manip_pkt,
52 .in_range = unknown_in_range, 51 .in_range = unknown_in_range,
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index f8fda57ba20b..e8b4d0d4439e 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -61,7 +61,7 @@ static struct
61static struct xt_table __nat_table = { 61static struct xt_table __nat_table = {
62 .name = "nat", 62 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 63 .valid_hooks = NAT_VALID_HOOKS,
64 .lock = RW_LOCK_UNLOCKED, 64 .lock = __RW_LOCK_UNLOCKED(__nat_table.lock),
65 .me = THIS_MODULE, 65 .me = THIS_MODULE,
66 .af = AF_INET, 66 .af = AF_INET,
67}; 67};
@@ -143,7 +143,7 @@ static bool ipt_snat_checkentry(const char *tablename,
143 void *targinfo, 143 void *targinfo,
144 unsigned int hook_mask) 144 unsigned int hook_mask)
145{ 145{
146 struct nf_nat_multi_range_compat *mr = targinfo; 146 const struct nf_nat_multi_range_compat *mr = targinfo;
147 147
148 /* Must be a valid range */ 148 /* Must be a valid range */
149 if (mr->rangesize != 1) { 149 if (mr->rangesize != 1) {
@@ -159,7 +159,7 @@ static bool ipt_dnat_checkentry(const char *tablename,
159 void *targinfo, 159 void *targinfo,
160 unsigned int hook_mask) 160 unsigned int hook_mask)
161{ 161{
162 struct nf_nat_multi_range_compat *mr = targinfo; 162 const struct nf_nat_multi_range_compat *mr = targinfo;
163 163
164 /* Must be a valid range */ 164 /* Must be a valid range */
165 if (mr->rangesize != 1) { 165 if (mr->rangesize != 1) {
@@ -188,25 +188,6 @@ alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
188 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); 188 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
189} 189}
190 190
191unsigned int
192alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum)
193{
194 __be32 ip
195 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
196 ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
197 : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
198 __be16 all
199 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
200 ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all
201 : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all);
202 struct nf_nat_range range
203 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } };
204
205 pr_debug("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n",
206 ct, NIPQUAD(ip));
207 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
208}
209
210int nf_nat_rule_find(struct sk_buff *skb, 191int nf_nat_rule_find(struct sk_buff *skb,
211 unsigned int hooknum, 192 unsigned int hooknum,
212 const struct net_device *in, 193 const struct net_device *in,
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index b4c8d4968bb2..4334d5cabc5b 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -2,6 +2,8 @@
2 * 2 *
3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> 3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
4 * based on RR's ip_nat_ftp.c and other modules. 4 * based on RR's ip_nat_ftp.c and other modules.
5 * (C) 2007 United Security Providers
6 * (C) 2007, 2008 Patrick McHardy <kaber@trash.net>
5 * 7 *
6 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -26,275 +28,461 @@ MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
26MODULE_DESCRIPTION("SIP NAT helper"); 28MODULE_DESCRIPTION("SIP NAT helper");
27MODULE_ALIAS("ip_nat_sip"); 29MODULE_ALIAS("ip_nat_sip");
28 30
29struct addr_map {
30 struct {
31 char src[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
32 char dst[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
33 unsigned int srclen, srciplen;
34 unsigned int dstlen, dstiplen;
35 } addr[IP_CT_DIR_MAX];
36};
37 31
38static void addr_map_init(const struct nf_conn *ct, struct addr_map *map) 32static unsigned int mangle_packet(struct sk_buff *skb,
33 const char **dptr, unsigned int *datalen,
34 unsigned int matchoff, unsigned int matchlen,
35 const char *buffer, unsigned int buflen)
39{ 36{
40 const struct nf_conntrack_tuple *t; 37 enum ip_conntrack_info ctinfo;
41 enum ip_conntrack_dir dir; 38 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
42 unsigned int n; 39
43 40 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, matchoff, matchlen,
44 for (dir = 0; dir < IP_CT_DIR_MAX; dir++) { 41 buffer, buflen))
45 t = &ct->tuplehash[dir].tuple; 42 return 0;
46 43
47 n = sprintf(map->addr[dir].src, "%u.%u.%u.%u", 44 /* Reload data pointer and adjust datalen value */
48 NIPQUAD(t->src.u3.ip)); 45 *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr);
49 map->addr[dir].srciplen = n; 46 *datalen += buflen - matchlen;
50 n += sprintf(map->addr[dir].src + n, ":%u", 47 return 1;
51 ntohs(t->src.u.udp.port));
52 map->addr[dir].srclen = n;
53
54 n = sprintf(map->addr[dir].dst, "%u.%u.%u.%u",
55 NIPQUAD(t->dst.u3.ip));
56 map->addr[dir].dstiplen = n;
57 n += sprintf(map->addr[dir].dst + n, ":%u",
58 ntohs(t->dst.u.udp.port));
59 map->addr[dir].dstlen = n;
60 }
61} 48}
62 49
63static int map_sip_addr(struct sk_buff *skb, enum ip_conntrack_info ctinfo, 50static int map_addr(struct sk_buff *skb,
64 struct nf_conn *ct, const char **dptr, size_t dlen, 51 const char **dptr, unsigned int *datalen,
65 enum sip_header_pos pos, struct addr_map *map) 52 unsigned int matchoff, unsigned int matchlen,
53 union nf_inet_addr *addr, __be16 port)
66{ 54{
55 enum ip_conntrack_info ctinfo;
56 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
67 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 57 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
68 unsigned int matchlen, matchoff, addrlen; 58 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
69 char *addr; 59 unsigned int buflen;
70 60 __be32 newaddr;
71 if (ct_sip_get_info(ct, *dptr, dlen, &matchoff, &matchlen, pos) <= 0) 61 __be16 newport;
62
63 if (ct->tuplehash[dir].tuple.src.u3.ip == addr->ip &&
64 ct->tuplehash[dir].tuple.src.u.udp.port == port) {
65 newaddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
66 newport = ct->tuplehash[!dir].tuple.dst.u.udp.port;
67 } else if (ct->tuplehash[dir].tuple.dst.u3.ip == addr->ip &&
68 ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
69 newaddr = ct->tuplehash[!dir].tuple.src.u3.ip;
70 newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
71 } else
72 return 1; 72 return 1;
73 73
74 if ((matchlen == map->addr[dir].srciplen || 74 if (newaddr == addr->ip && newport == port)
75 matchlen == map->addr[dir].srclen) &&
76 memcmp(*dptr + matchoff, map->addr[dir].src, matchlen) == 0) {
77 addr = map->addr[!dir].dst;
78 addrlen = map->addr[!dir].dstlen;
79 } else if ((matchlen == map->addr[dir].dstiplen ||
80 matchlen == map->addr[dir].dstlen) &&
81 memcmp(*dptr + matchoff, map->addr[dir].dst, matchlen) == 0) {
82 addr = map->addr[!dir].src;
83 addrlen = map->addr[!dir].srclen;
84 } else
85 return 1; 75 return 1;
86 76
87 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, 77 buflen = sprintf(buffer, "%u.%u.%u.%u:%u",
88 matchoff, matchlen, addr, addrlen)) 78 NIPQUAD(newaddr), ntohs(newport));
89 return 0;
90 *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr);
91 return 1;
92 79
80 return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
81 buffer, buflen);
93} 82}
94 83
95static unsigned int ip_nat_sip(struct sk_buff *skb, 84static int map_sip_addr(struct sk_buff *skb,
96 enum ip_conntrack_info ctinfo, 85 const char **dptr, unsigned int *datalen,
97 struct nf_conn *ct, 86 enum sip_header_types type)
98 const char **dptr)
99{ 87{
100 enum sip_header_pos pos; 88 enum ip_conntrack_info ctinfo;
101 struct addr_map map; 89 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
102 int dataoff, datalen; 90 unsigned int matchlen, matchoff;
91 union nf_inet_addr addr;
92 __be16 port;
103 93
104 dataoff = ip_hdrlen(skb) + sizeof(struct udphdr); 94 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
105 datalen = skb->len - dataoff; 95 &matchoff, &matchlen, &addr, &port) <= 0)
106 if (datalen < sizeof("SIP/2.0") - 1) 96 return 1;
107 return NF_ACCEPT; 97 return map_addr(skb, dptr, datalen, matchoff, matchlen, &addr, port);
98}
108 99
109 addr_map_init(ct, &map); 100static unsigned int ip_nat_sip(struct sk_buff *skb,
101 const char **dptr, unsigned int *datalen)
102{
103 enum ip_conntrack_info ctinfo;
104 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
105 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
106 unsigned int dataoff, matchoff, matchlen;
107 union nf_inet_addr addr;
108 __be16 port;
109 int request, in_header;
110 110
111 /* Basic rules: requests and responses. */ 111 /* Basic rules: requests and responses. */
112 if (strncmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) != 0) { 112 if (strnicmp(*dptr, "SIP/2.0", strlen("SIP/2.0")) != 0) {
113 /* 10.2: Constructing the REGISTER Request: 113 if (ct_sip_parse_request(ct, *dptr, *datalen,
114 * 114 &matchoff, &matchlen,
115 * The "userinfo" and "@" components of the SIP URI MUST NOT 115 &addr, &port) > 0 &&
116 * be present. 116 !map_addr(skb, dptr, datalen, matchoff, matchlen,
117 */ 117 &addr, port))
118 if (datalen >= sizeof("REGISTER") - 1 && 118 return NF_DROP;
119 strncmp(*dptr, "REGISTER", sizeof("REGISTER") - 1) == 0) 119 request = 1;
120 pos = POS_REG_REQ_URI; 120 } else
121 else 121 request = 0;
122 pos = POS_REQ_URI; 122
123 123 /* Translate topmost Via header and parameters */
124 if (!map_sip_addr(skb, ctinfo, ct, dptr, datalen, pos, &map)) 124 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
125 SIP_HDR_VIA, NULL, &matchoff, &matchlen,
126 &addr, &port) > 0) {
127 unsigned int matchend, poff, plen, buflen, n;
128 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
129
130 /* We're only interested in headers related to this
131 * connection */
132 if (request) {
133 if (addr.ip != ct->tuplehash[dir].tuple.src.u3.ip ||
134 port != ct->tuplehash[dir].tuple.src.u.udp.port)
135 goto next;
136 } else {
137 if (addr.ip != ct->tuplehash[dir].tuple.dst.u3.ip ||
138 port != ct->tuplehash[dir].tuple.dst.u.udp.port)
139 goto next;
140 }
141
142 if (!map_addr(skb, dptr, datalen, matchoff, matchlen,
143 &addr, port))
125 return NF_DROP; 144 return NF_DROP;
145
146 matchend = matchoff + matchlen;
147
148 /* The maddr= parameter (RFC 2361) specifies where to send
149 * the reply. */
150 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
151 "maddr=", &poff, &plen,
152 &addr) > 0 &&
153 addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
154 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
155 __be32 ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
156 buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
157 if (!mangle_packet(skb, dptr, datalen, poff, plen,
158 buffer, buflen))
159 return NF_DROP;
160 }
161
162 /* The received= parameter (RFC 2361) contains the address
163 * from which the server received the request. */
164 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
165 "received=", &poff, &plen,
166 &addr) > 0 &&
167 addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
168 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
169 __be32 ip = ct->tuplehash[!dir].tuple.src.u3.ip;
170 buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
171 if (!mangle_packet(skb, dptr, datalen, poff, plen,
172 buffer, buflen))
173 return NF_DROP;
174 }
175
176 /* The rport= parameter (RFC 3581) contains the port number
177 * from which the server received the request. */
178 if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen,
179 "rport=", &poff, &plen,
180 &n) > 0 &&
181 htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port &&
182 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
183 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
184 buflen = sprintf(buffer, "%u", ntohs(p));
185 if (!mangle_packet(skb, dptr, datalen, poff, plen,
186 buffer, buflen))
187 return NF_DROP;
188 }
126 } 189 }
127 190
128 if (!map_sip_addr(skb, ctinfo, ct, dptr, datalen, POS_FROM, &map) || 191next:
129 !map_sip_addr(skb, ctinfo, ct, dptr, datalen, POS_TO, &map) || 192 /* Translate Contact headers */
130 !map_sip_addr(skb, ctinfo, ct, dptr, datalen, POS_VIA, &map) || 193 dataoff = 0;
131 !map_sip_addr(skb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map)) 194 in_header = 0;
195 while (ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen,
196 SIP_HDR_CONTACT, &in_header,
197 &matchoff, &matchlen,
198 &addr, &port) > 0) {
199 if (!map_addr(skb, dptr, datalen, matchoff, matchlen,
200 &addr, port))
201 return NF_DROP;
202 }
203
204 if (!map_sip_addr(skb, dptr, datalen, SIP_HDR_FROM) ||
205 !map_sip_addr(skb, dptr, datalen, SIP_HDR_TO))
132 return NF_DROP; 206 return NF_DROP;
133 return NF_ACCEPT; 207 return NF_ACCEPT;
134} 208}
135 209
136static unsigned int mangle_sip_packet(struct sk_buff *skb, 210/* Handles expected signalling connections and media streams */
137 enum ip_conntrack_info ctinfo, 211static void ip_nat_sip_expected(struct nf_conn *ct,
138 struct nf_conn *ct, 212 struct nf_conntrack_expect *exp)
139 const char **dptr, size_t dlen,
140 char *buffer, int bufflen,
141 enum sip_header_pos pos)
142{ 213{
143 unsigned int matchlen, matchoff; 214 struct nf_nat_range range;
144 215
145 if (ct_sip_get_info(ct, *dptr, dlen, &matchoff, &matchlen, pos) <= 0) 216 /* This must be a fresh one. */
146 return 0; 217 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
147 218
148 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, 219 /* For DST manip, map port here to where it's expected. */
149 matchoff, matchlen, buffer, bufflen)) 220 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
150 return 0; 221 range.min = range.max = exp->saved_proto;
222 range.min_ip = range.max_ip = exp->saved_ip;
223 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
151 224
152 /* We need to reload this. Thanks Patrick. */ 225 /* Change src to where master sends to, but only if the connection
153 *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); 226 * actually came from the same source. */
154 return 1; 227 if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip ==
228 ct->master->tuplehash[exp->dir].tuple.src.u3.ip) {
229 range.flags = IP_NAT_RANGE_MAP_IPS;
230 range.min_ip = range.max_ip
231 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
232 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
233 }
155} 234}
156 235
157static int mangle_content_len(struct sk_buff *skb, 236static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
158 enum ip_conntrack_info ctinfo, 237 const char **dptr, unsigned int *datalen,
159 struct nf_conn *ct, 238 struct nf_conntrack_expect *exp,
160 const char *dptr) 239 unsigned int matchoff,
240 unsigned int matchlen)
161{ 241{
162 unsigned int dataoff, matchoff, matchlen; 242 enum ip_conntrack_info ctinfo;
163 char buffer[sizeof("65536")]; 243 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
164 int bufflen; 244 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
245 __be32 newip;
246 u_int16_t port;
247 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
248 unsigned buflen;
165 249
166 dataoff = ip_hdrlen(skb) + sizeof(struct udphdr); 250 /* Connection will come from reply */
251 if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip)
252 newip = exp->tuple.dst.u3.ip;
253 else
254 newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
167 255
168 /* Get actual SDP length */ 256 /* If the signalling port matches the connection's source port in the
169 if (ct_sip_get_info(ct, dptr, skb->len - dataoff, &matchoff, 257 * original direction, try to use the destination port in the opposite
170 &matchlen, POS_SDP_HEADER) > 0) { 258 * direction. */
259 if (exp->tuple.dst.u.udp.port ==
260 ct->tuplehash[dir].tuple.src.u.udp.port)
261 port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
262 else
263 port = ntohs(exp->tuple.dst.u.udp.port);
264
265 exp->saved_ip = exp->tuple.dst.u3.ip;
266 exp->tuple.dst.u3.ip = newip;
267 exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
268 exp->dir = !dir;
269 exp->expectfn = ip_nat_sip_expected;
171 270
172 /* since ct_sip_get_info() give us a pointer passing 'v=' 271 for (; port != 0; port++) {
173 we need to add 2 bytes in this count. */ 272 exp->tuple.dst.u.udp.port = htons(port);
174 int c_len = skb->len - dataoff - matchoff + 2; 273 if (nf_ct_expect_related(exp) == 0)
274 break;
275 }
175 276
176 /* Now, update SDP length */ 277 if (port == 0)
177 if (ct_sip_get_info(ct, dptr, skb->len - dataoff, &matchoff, 278 return NF_DROP;
178 &matchlen, POS_CONTENT) > 0) {
179 279
180 bufflen = sprintf(buffer, "%u", c_len); 280 if (exp->tuple.dst.u3.ip != exp->saved_ip ||
181 return nf_nat_mangle_udp_packet(skb, ct, ctinfo, 281 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
182 matchoff, matchlen, 282 buflen = sprintf(buffer, "%u.%u.%u.%u:%u",
183 buffer, bufflen); 283 NIPQUAD(newip), port);
184 } 284 if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen,
285 buffer, buflen))
286 goto err;
185 } 287 }
186 return 0; 288 return NF_ACCEPT;
289
290err:
291 nf_ct_unexpect_related(exp);
292 return NF_DROP;
187} 293}
188 294
189static unsigned int mangle_sdp(struct sk_buff *skb, 295static int mangle_content_len(struct sk_buff *skb,
190 enum ip_conntrack_info ctinfo, 296 const char **dptr, unsigned int *datalen)
191 struct nf_conn *ct,
192 __be32 newip, u_int16_t port,
193 const char *dptr)
194{ 297{
195 char buffer[sizeof("nnn.nnn.nnn.nnn")]; 298 enum ip_conntrack_info ctinfo;
196 unsigned int dataoff, bufflen; 299 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
300 unsigned int matchoff, matchlen;
301 char buffer[sizeof("65536")];
302 int buflen, c_len;
197 303
198 dataoff = ip_hdrlen(skb) + sizeof(struct udphdr); 304 /* Get actual SDP length */
305 if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
306 SDP_HDR_VERSION, SDP_HDR_UNSPEC,
307 &matchoff, &matchlen) <= 0)
308 return 0;
309 c_len = *datalen - matchoff + strlen("v=");
199 310
200 /* Mangle owner and contact info. */ 311 /* Now, update SDP length */
201 bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); 312 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CONTENT_LENGTH,
202 if (!mangle_sip_packet(skb, ctinfo, ct, &dptr, skb->len - dataoff, 313 &matchoff, &matchlen) <= 0)
203 buffer, bufflen, POS_OWNER_IP4))
204 return 0; 314 return 0;
205 315
206 if (!mangle_sip_packet(skb, ctinfo, ct, &dptr, skb->len - dataoff, 316 buflen = sprintf(buffer, "%u", c_len);
207 buffer, bufflen, POS_CONNECTION_IP4)) 317 return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
318 buffer, buflen);
319}
320
321static unsigned mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
322 unsigned int dataoff, unsigned int *datalen,
323 enum sdp_header_types type,
324 enum sdp_header_types term,
325 char *buffer, int buflen)
326{
327 enum ip_conntrack_info ctinfo;
328 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
329 unsigned int matchlen, matchoff;
330
331 if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term,
332 &matchoff, &matchlen) <= 0)
208 return 0; 333 return 0;
334 return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
335 buffer, buflen);
336}
209 337
210 /* Mangle media port. */ 338static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
211 bufflen = sprintf(buffer, "%u", port); 339 unsigned int dataoff,
212 if (!mangle_sip_packet(skb, ctinfo, ct, &dptr, skb->len - dataoff, 340 unsigned int *datalen,
213 buffer, bufflen, POS_MEDIA)) 341 enum sdp_header_types type,
342 enum sdp_header_types term,
343 const union nf_inet_addr *addr)
344{
345 char buffer[sizeof("nnn.nnn.nnn.nnn")];
346 unsigned int buflen;
347
348 buflen = sprintf(buffer, NIPQUAD_FMT, NIPQUAD(addr->ip));
349 if (!mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term,
350 buffer, buflen))
214 return 0; 351 return 0;
215 352
216 return mangle_content_len(skb, ctinfo, ct, dptr); 353 return mangle_content_len(skb, dptr, datalen);
217} 354}
218 355
219static void ip_nat_sdp_expect(struct nf_conn *ct, 356static unsigned int ip_nat_sdp_port(struct sk_buff *skb,
220 struct nf_conntrack_expect *exp) 357 const char **dptr,
358 unsigned int *datalen,
359 unsigned int matchoff,
360 unsigned int matchlen,
361 u_int16_t port)
221{ 362{
222 struct nf_nat_range range; 363 char buffer[sizeof("nnnnn")];
364 unsigned int buflen;
223 365
224 /* This must be a fresh one. */ 366 buflen = sprintf(buffer, "%u", port);
225 BUG_ON(ct->status & IPS_NAT_DONE_MASK); 367 if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen,
368 buffer, buflen))
369 return 0;
226 370
227 /* Change src to where master sends to */ 371 return mangle_content_len(skb, dptr, datalen);
228 range.flags = IP_NAT_RANGE_MAP_IPS; 372}
229 range.min_ip = range.max_ip
230 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
231 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
232 373
233 /* For DST manip, map port here to where it's expected. */ 374static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
234 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); 375 unsigned int dataoff,
235 range.min = range.max = exp->saved_proto; 376 unsigned int *datalen,
236 range.min_ip = range.max_ip = exp->saved_ip; 377 const union nf_inet_addr *addr)
237 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); 378{
379 char buffer[sizeof("nnn.nnn.nnn.nnn")];
380 unsigned int buflen;
381
382 /* Mangle session description owner and contact addresses */
383 buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(addr->ip));
384 if (!mangle_sdp_packet(skb, dptr, dataoff, datalen,
385 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
386 buffer, buflen))
387 return 0;
388
389 if (!mangle_sdp_packet(skb, dptr, dataoff, datalen,
390 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
391 buffer, buflen))
392 return 0;
393
394 return mangle_content_len(skb, dptr, datalen);
238} 395}
239 396
240/* So, this packet has hit the connection tracking matching code. 397/* So, this packet has hit the connection tracking matching code.
241 Mangle it, and change the expectation to match the new version. */ 398 Mangle it, and change the expectation to match the new version. */
242static unsigned int ip_nat_sdp(struct sk_buff *skb, 399static unsigned int ip_nat_sdp_media(struct sk_buff *skb,
243 enum ip_conntrack_info ctinfo, 400 const char **dptr,
244 struct nf_conntrack_expect *exp, 401 unsigned int *datalen,
245 const char *dptr) 402 struct nf_conntrack_expect *rtp_exp,
403 struct nf_conntrack_expect *rtcp_exp,
404 unsigned int mediaoff,
405 unsigned int medialen,
406 union nf_inet_addr *rtp_addr)
246{ 407{
247 struct nf_conn *ct = exp->master; 408 enum ip_conntrack_info ctinfo;
409 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
248 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 410 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
249 __be32 newip;
250 u_int16_t port; 411 u_int16_t port;
251 412
252 /* Connection will come from reply */ 413 /* Connection will come from reply */
253 if (ct->tuplehash[dir].tuple.src.u3.ip == 414 if (ct->tuplehash[dir].tuple.src.u3.ip ==
254 ct->tuplehash[!dir].tuple.dst.u3.ip) 415 ct->tuplehash[!dir].tuple.dst.u3.ip)
255 newip = exp->tuple.dst.u3.ip; 416 rtp_addr->ip = rtp_exp->tuple.dst.u3.ip;
256 else 417 else
257 newip = ct->tuplehash[!dir].tuple.dst.u3.ip; 418 rtp_addr->ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
258 419
259 exp->saved_ip = exp->tuple.dst.u3.ip; 420 rtp_exp->saved_ip = rtp_exp->tuple.dst.u3.ip;
260 exp->tuple.dst.u3.ip = newip; 421 rtp_exp->tuple.dst.u3.ip = rtp_addr->ip;
261 exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port; 422 rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
262 exp->dir = !dir; 423 rtp_exp->dir = !dir;
263 424 rtp_exp->expectfn = ip_nat_sip_expected;
264 /* When you see the packet, we need to NAT it the same as the 425
265 this one. */ 426 rtcp_exp->saved_ip = rtcp_exp->tuple.dst.u3.ip;
266 exp->expectfn = ip_nat_sdp_expect; 427 rtcp_exp->tuple.dst.u3.ip = rtp_addr->ip;
267 428 rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
268 /* Try to get same port: if not, try to change it. */ 429 rtcp_exp->dir = !dir;
269 for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) { 430 rtcp_exp->expectfn = ip_nat_sip_expected;
270 exp->tuple.dst.u.udp.port = htons(port); 431
271 if (nf_ct_expect_related(exp) == 0) 432 /* Try to get same pair of ports: if not, try to change them. */
433 for (port = ntohs(rtp_exp->tuple.dst.u.udp.port);
434 port != 0; port += 2) {
435 rtp_exp->tuple.dst.u.udp.port = htons(port);
436 if (nf_ct_expect_related(rtp_exp) != 0)
437 continue;
438 rtcp_exp->tuple.dst.u.udp.port = htons(port + 1);
439 if (nf_ct_expect_related(rtcp_exp) == 0)
272 break; 440 break;
441 nf_ct_unexpect_related(rtp_exp);
273 } 442 }
274 443
275 if (port == 0) 444 if (port == 0)
276 return NF_DROP; 445 goto err1;
446
447 /* Update media port. */
448 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
449 !ip_nat_sdp_port(skb, dptr, datalen, mediaoff, medialen, port))
450 goto err2;
277 451
278 if (!mangle_sdp(skb, ctinfo, ct, newip, port, dptr)) {
279 nf_ct_unexpect_related(exp);
280 return NF_DROP;
281 }
282 return NF_ACCEPT; 452 return NF_ACCEPT;
453
454err2:
455 nf_ct_unexpect_related(rtp_exp);
456 nf_ct_unexpect_related(rtcp_exp);
457err1:
458 return NF_DROP;
283} 459}
284 460
285static void __exit nf_nat_sip_fini(void) 461static void __exit nf_nat_sip_fini(void)
286{ 462{
287 rcu_assign_pointer(nf_nat_sip_hook, NULL); 463 rcu_assign_pointer(nf_nat_sip_hook, NULL);
288 rcu_assign_pointer(nf_nat_sdp_hook, NULL); 464 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL);
465 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL);
466 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL);
467 rcu_assign_pointer(nf_nat_sdp_session_hook, NULL);
468 rcu_assign_pointer(nf_nat_sdp_media_hook, NULL);
289 synchronize_rcu(); 469 synchronize_rcu();
290} 470}
291 471
292static int __init nf_nat_sip_init(void) 472static int __init nf_nat_sip_init(void)
293{ 473{
294 BUG_ON(nf_nat_sip_hook != NULL); 474 BUG_ON(nf_nat_sip_hook != NULL);
295 BUG_ON(nf_nat_sdp_hook != NULL); 475 BUG_ON(nf_nat_sip_expect_hook != NULL);
476 BUG_ON(nf_nat_sdp_addr_hook != NULL);
477 BUG_ON(nf_nat_sdp_port_hook != NULL);
478 BUG_ON(nf_nat_sdp_session_hook != NULL);
479 BUG_ON(nf_nat_sdp_media_hook != NULL);
296 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); 480 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
297 rcu_assign_pointer(nf_nat_sdp_hook, ip_nat_sdp); 481 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect);
482 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
483 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port);
484 rcu_assign_pointer(nf_nat_sdp_session_hook, ip_nat_sdp_session);
485 rcu_assign_pointer(nf_nat_sdp_media_hook, ip_nat_sdp_media);
298 return 0; 486 return 0;
299} 487}
300 488
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 540ce6ae887c..5daefad3d193 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -50,6 +50,7 @@
50#include <net/udp.h> 50#include <net/udp.h>
51 51
52#include <net/netfilter/nf_nat.h> 52#include <net/netfilter/nf_nat.h>
53#include <net/netfilter/nf_conntrack_expect.h>
53#include <net/netfilter/nf_conntrack_helper.h> 54#include <net/netfilter/nf_conntrack_helper.h>
54#include <net/netfilter/nf_nat_helper.h> 55#include <net/netfilter/nf_nat_helper.h>
55 56
@@ -219,7 +220,7 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
219 if (ch < 0x80) 220 if (ch < 0x80)
220 *len = ch; 221 *len = ch;
221 else { 222 else {
222 cnt = (unsigned char) (ch & 0x7F); 223 cnt = ch & 0x7F;
223 *len = 0; 224 *len = 0;
224 225
225 while (cnt > 0) { 226 while (cnt > 0) {
@@ -617,8 +618,7 @@ struct snmp_cnv
617 int syntax; 618 int syntax;
618}; 619};
619 620
620static struct snmp_cnv snmp_conv [] = 621static const struct snmp_cnv snmp_conv[] = {
621{
622 {ASN1_UNI, ASN1_NUL, SNMP_NULL}, 622 {ASN1_UNI, ASN1_NUL, SNMP_NULL},
623 {ASN1_UNI, ASN1_INT, SNMP_INTEGER}, 623 {ASN1_UNI, ASN1_INT, SNMP_INTEGER},
624 {ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR}, 624 {ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR},
@@ -643,7 +643,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag,
643 unsigned int cls, 643 unsigned int cls,
644 unsigned short *syntax) 644 unsigned short *syntax)
645{ 645{
646 struct snmp_cnv *cnv; 646 const struct snmp_cnv *cnv;
647 647
648 cnv = snmp_conv; 648 cnv = snmp_conv;
649 649
@@ -903,7 +903,7 @@ static inline void mangle_address(unsigned char *begin,
903 u_int32_t old; 903 u_int32_t old;
904 904
905 if (debug) 905 if (debug)
906 memcpy(&old, (unsigned char *)addr, sizeof(old)); 906 memcpy(&old, addr, sizeof(old));
907 907
908 *addr = map->to; 908 *addr = map->to;
909 909
@@ -998,7 +998,7 @@ err_id_free:
998 * 998 *
999 *****************************************************************************/ 999 *****************************************************************************/
1000 1000
1001static void hex_dump(unsigned char *buf, size_t len) 1001static void hex_dump(const unsigned char *buf, size_t len)
1002{ 1002{
1003 size_t i; 1003 size_t i;
1004 1004
@@ -1079,7 +1079,7 @@ static int snmp_parse_mangle(unsigned char *msg,
1079 if (cls != ASN1_CTX || con != ASN1_CON) 1079 if (cls != ASN1_CTX || con != ASN1_CON)
1080 return 0; 1080 return 0;
1081 if (debug > 1) { 1081 if (debug > 1) {
1082 unsigned char *pdus[] = { 1082 static const unsigned char *const pdus[] = {
1083 [SNMP_PDU_GET] = "get", 1083 [SNMP_PDU_GET] = "get",
1084 [SNMP_PDU_NEXT] = "get-next", 1084 [SNMP_PDU_NEXT] = "get-next",
1085 [SNMP_PDU_RESPONSE] = "response", 1085 [SNMP_PDU_RESPONSE] = "response",
@@ -1231,8 +1231,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
1231{ 1231{
1232 int dir = CTINFO2DIR(ctinfo); 1232 int dir = CTINFO2DIR(ctinfo);
1233 unsigned int ret; 1233 unsigned int ret;
1234 struct iphdr *iph = ip_hdr(skb); 1234 const struct iphdr *iph = ip_hdr(skb);
1235 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); 1235 const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
1236 1236
1237 /* SNMP replies and originating SNMP traps get mangled */ 1237 /* SNMP replies and originating SNMP traps get mangled */
1238 if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY) 1238 if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
@@ -1267,11 +1267,15 @@ static int help(struct sk_buff *skb, unsigned int protoff,
1267 return ret; 1267 return ret;
1268} 1268}
1269 1269
1270static const struct nf_conntrack_expect_policy snmp_exp_policy = {
1271 .max_expected = 0,
1272 .timeout = 180,
1273};
1274
1270static struct nf_conntrack_helper snmp_helper __read_mostly = { 1275static struct nf_conntrack_helper snmp_helper __read_mostly = {
1271 .max_expected = 0,
1272 .timeout = 180,
1273 .me = THIS_MODULE, 1276 .me = THIS_MODULE,
1274 .help = help, 1277 .help = help,
1278 .expect_policy = &snmp_exp_policy,
1275 .name = "snmp", 1279 .name = "snmp",
1276 .tuple.src.l3num = AF_INET, 1280 .tuple.src.l3num = AF_INET,
1277 .tuple.src.u.udp.port = __constant_htons(SNMP_PORT), 1281 .tuple.src.u.udp.port = __constant_htons(SNMP_PORT),
@@ -1279,10 +1283,9 @@ static struct nf_conntrack_helper snmp_helper __read_mostly = {
1279}; 1283};
1280 1284
1281static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { 1285static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1282 .max_expected = 0,
1283 .timeout = 180,
1284 .me = THIS_MODULE, 1286 .me = THIS_MODULE,
1285 .help = help, 1287 .help = help,
1288 .expect_policy = &snmp_exp_policy,
1286 .name = "snmp_trap", 1289 .name = "snmp_trap",
1287 .tuple.src.l3num = AF_INET, 1290 .tuple.src.l3num = AF_INET,
1288 .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT), 1291 .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT),
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 99b2c788d5a8..b7dd695691a0 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -30,8 +30,8 @@
30#ifdef CONFIG_XFRM 30#ifdef CONFIG_XFRM
31static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) 31static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
32{ 32{
33 struct nf_conn *ct; 33 const struct nf_conn *ct;
34 struct nf_conntrack_tuple *t; 34 const struct nf_conntrack_tuple *t;
35 enum ip_conntrack_info ctinfo; 35 enum ip_conntrack_info ctinfo;
36 enum ip_conntrack_dir dir; 36 enum ip_conntrack_dir dir;
37 unsigned long statusbit; 37 unsigned long statusbit;
@@ -50,7 +50,10 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
50 if (ct->status & statusbit) { 50 if (ct->status & statusbit) {
51 fl->fl4_dst = t->dst.u3.ip; 51 fl->fl4_dst = t->dst.u3.ip;
52 if (t->dst.protonum == IPPROTO_TCP || 52 if (t->dst.protonum == IPPROTO_TCP ||
53 t->dst.protonum == IPPROTO_UDP) 53 t->dst.protonum == IPPROTO_UDP ||
54 t->dst.protonum == IPPROTO_UDPLITE ||
55 t->dst.protonum == IPPROTO_DCCP ||
56 t->dst.protonum == IPPROTO_SCTP)
54 fl->fl_ip_dport = t->dst.u.tcp.port; 57 fl->fl_ip_dport = t->dst.u.tcp.port;
55 } 58 }
56 59
@@ -59,7 +62,10 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
59 if (ct->status & statusbit) { 62 if (ct->status & statusbit) {
60 fl->fl4_src = t->src.u3.ip; 63 fl->fl4_src = t->src.u3.ip;
61 if (t->dst.protonum == IPPROTO_TCP || 64 if (t->dst.protonum == IPPROTO_TCP ||
62 t->dst.protonum == IPPROTO_UDP) 65 t->dst.protonum == IPPROTO_UDP ||
66 t->dst.protonum == IPPROTO_UDPLITE ||
67 t->dst.protonum == IPPROTO_DCCP ||
68 t->dst.protonum == IPPROTO_SCTP)
63 fl->fl_ip_sport = t->src.u.tcp.port; 69 fl->fl_ip_sport = t->src.u.tcp.port;
64 } 70 }
65} 71}
@@ -87,21 +93,8 @@ nf_nat_fn(unsigned int hooknum,
87 have dropped it. Hence it's the user's responsibilty to 93 have dropped it. Hence it's the user's responsibilty to
88 packet filter it out, or implement conntrack/NAT for that 94 packet filter it out, or implement conntrack/NAT for that
89 protocol. 8) --RR */ 95 protocol. 8) --RR */
90 if (!ct) { 96 if (!ct)
91 /* Exception: ICMP redirect to new connection (not in
92 hash table yet). We must not let this through, in
93 case we're doing NAT to the same network. */
94 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
95 struct icmphdr _hdr, *hp;
96
97 hp = skb_header_pointer(skb, ip_hdrlen(skb),
98 sizeof(_hdr), &_hdr);
99 if (hp != NULL &&
100 hp->type == ICMP_REDIRECT)
101 return NF_DROP;
102 }
103 return NF_ACCEPT; 97 return NF_ACCEPT;
104 }
105 98
106 /* Don't try to NAT if this packet is not conntracked */ 99 /* Don't try to NAT if this packet is not conntracked */
107 if (ct == &nf_conntrack_untracked) 100 if (ct == &nf_conntrack_untracked)
@@ -109,6 +102,9 @@ nf_nat_fn(unsigned int hooknum,
109 102
110 nat = nfct_nat(ct); 103 nat = nfct_nat(ct);
111 if (!nat) { 104 if (!nat) {
105 /* NAT module was loaded late. */
106 if (nf_ct_is_confirmed(ct))
107 return NF_ACCEPT;
112 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); 108 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
113 if (nat == NULL) { 109 if (nat == NULL) {
114 pr_debug("failed to add NAT extension\n"); 110 pr_debug("failed to add NAT extension\n");
@@ -134,10 +130,7 @@ nf_nat_fn(unsigned int hooknum,
134 if (!nf_nat_initialized(ct, maniptype)) { 130 if (!nf_nat_initialized(ct, maniptype)) {
135 unsigned int ret; 131 unsigned int ret;
136 132
137 if (unlikely(nf_ct_is_confirmed(ct))) 133 if (hooknum == NF_INET_LOCAL_IN)
138 /* NAT module was loaded late */
139 ret = alloc_null_binding_confirmed(ct, hooknum);
140 else if (hooknum == NF_INET_LOCAL_IN)
141 /* LOCAL_IN hook doesn't have a chain! */ 134 /* LOCAL_IN hook doesn't have a chain! */
142 ret = alloc_null_binding(ct, hooknum); 135 ret = alloc_null_binding(ct, hooknum);
143 else 136 else
@@ -189,7 +182,7 @@ nf_nat_out(unsigned int hooknum,
189 int (*okfn)(struct sk_buff *)) 182 int (*okfn)(struct sk_buff *))
190{ 183{
191#ifdef CONFIG_XFRM 184#ifdef CONFIG_XFRM
192 struct nf_conn *ct; 185 const struct nf_conn *ct;
193 enum ip_conntrack_info ctinfo; 186 enum ip_conntrack_info ctinfo;
194#endif 187#endif
195 unsigned int ret; 188 unsigned int ret;
@@ -223,7 +216,7 @@ nf_nat_local_fn(unsigned int hooknum,
223 const struct net_device *out, 216 const struct net_device *out,
224 int (*okfn)(struct sk_buff *)) 217 int (*okfn)(struct sk_buff *))
225{ 218{
226 struct nf_conn *ct; 219 const struct nf_conn *ct;
227 enum ip_conntrack_info ctinfo; 220 enum ip_conntrack_info ctinfo;
228 unsigned int ret; 221 unsigned int ret;
229 222
@@ -252,25 +245,6 @@ nf_nat_local_fn(unsigned int hooknum,
252 return ret; 245 return ret;
253} 246}
254 247
255static unsigned int
256nf_nat_adjust(unsigned int hooknum,
257 struct sk_buff *skb,
258 const struct net_device *in,
259 const struct net_device *out,
260 int (*okfn)(struct sk_buff *))
261{
262 struct nf_conn *ct;
263 enum ip_conntrack_info ctinfo;
264
265 ct = nf_ct_get(skb, &ctinfo);
266 if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
267 pr_debug("nf_nat_standalone: adjusting sequence number\n");
268 if (!nf_nat_seq_adjust(skb, ct, ctinfo))
269 return NF_DROP;
270 }
271 return NF_ACCEPT;
272}
273
274/* We must be after connection tracking and before packet filtering. */ 248/* We must be after connection tracking and before packet filtering. */
275 249
276static struct nf_hook_ops nf_nat_ops[] __read_mostly = { 250static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
@@ -290,14 +264,6 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
290 .hooknum = NF_INET_POST_ROUTING, 264 .hooknum = NF_INET_POST_ROUTING,
291 .priority = NF_IP_PRI_NAT_SRC, 265 .priority = NF_IP_PRI_NAT_SRC,
292 }, 266 },
293 /* After conntrack, adjust sequence number */
294 {
295 .hook = nf_nat_adjust,
296 .owner = THIS_MODULE,
297 .pf = PF_INET,
298 .hooknum = NF_INET_POST_ROUTING,
299 .priority = NF_IP_PRI_NAT_SEQ_ADJUST,
300 },
301 /* Before packet filtering, change destination */ 267 /* Before packet filtering, change destination */
302 { 268 {
303 .hook = nf_nat_local_fn, 269 .hook = nf_nat_local_fn,
@@ -314,14 +280,6 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
314 .hooknum = NF_INET_LOCAL_IN, 280 .hooknum = NF_INET_LOCAL_IN,
315 .priority = NF_IP_PRI_NAT_SRC, 281 .priority = NF_IP_PRI_NAT_SRC,
316 }, 282 },
317 /* After conntrack, adjust sequence number */
318 {
319 .hook = nf_nat_adjust,
320 .owner = THIS_MODULE,
321 .pf = PF_INET,
322 .hooknum = NF_INET_LOCAL_IN,
323 .priority = NF_IP_PRI_NAT_SEQ_ADJUST,
324 },
325}; 283};
326 284
327static int __init nf_nat_standalone_init(void) 285static int __init nf_nat_standalone_init(void)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index d63474c6b400..552169b41b16 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -51,24 +51,54 @@
51 */ 51 */
52static int sockstat_seq_show(struct seq_file *seq, void *v) 52static int sockstat_seq_show(struct seq_file *seq, void *v)
53{ 53{
54 struct net *net = seq->private;
55
54 socket_seq_show(seq); 56 socket_seq_show(seq);
55 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 57 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
56 sock_prot_inuse_get(&tcp_prot), 58 sock_prot_inuse_get(net, &tcp_prot),
57 atomic_read(&tcp_orphan_count), 59 atomic_read(&tcp_orphan_count),
58 tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated), 60 tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated),
59 atomic_read(&tcp_memory_allocated)); 61 atomic_read(&tcp_memory_allocated));
60 seq_printf(seq, "UDP: inuse %d mem %d\n", sock_prot_inuse_get(&udp_prot), 62 seq_printf(seq, "UDP: inuse %d mem %d\n",
63 sock_prot_inuse_get(net, &udp_prot),
61 atomic_read(&udp_memory_allocated)); 64 atomic_read(&udp_memory_allocated));
62 seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse_get(&udplite_prot)); 65 seq_printf(seq, "UDPLITE: inuse %d\n",
63 seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse_get(&raw_prot)); 66 sock_prot_inuse_get(net, &udplite_prot));
67 seq_printf(seq, "RAW: inuse %d\n",
68 sock_prot_inuse_get(net, &raw_prot));
64 seq_printf(seq, "FRAG: inuse %d memory %d\n", 69 seq_printf(seq, "FRAG: inuse %d memory %d\n",
65 ip_frag_nqueues(&init_net), ip_frag_mem(&init_net)); 70 ip_frag_nqueues(net), ip_frag_mem(net));
66 return 0; 71 return 0;
67} 72}
68 73
69static int sockstat_seq_open(struct inode *inode, struct file *file) 74static int sockstat_seq_open(struct inode *inode, struct file *file)
70{ 75{
71 return single_open(file, sockstat_seq_show, NULL); 76 int err;
77 struct net *net;
78
79 err = -ENXIO;
80 net = get_proc_net(inode);
81 if (net == NULL)
82 goto err_net;
83
84 err = single_open(file, sockstat_seq_show, net);
85 if (err < 0)
86 goto err_open;
87
88 return 0;
89
90err_open:
91 put_net(net);
92err_net:
93 return err;
94}
95
96static int sockstat_seq_release(struct inode *inode, struct file *file)
97{
98 struct net *net = ((struct seq_file *)file->private_data)->private;
99
100 put_net(net);
101 return single_release(inode, file);
72} 102}
73 103
74static const struct file_operations sockstat_seq_fops = { 104static const struct file_operations sockstat_seq_fops = {
@@ -76,7 +106,7 @@ static const struct file_operations sockstat_seq_fops = {
76 .open = sockstat_seq_open, 106 .open = sockstat_seq_open,
77 .read = seq_read, 107 .read = seq_read,
78 .llseek = seq_lseek, 108 .llseek = seq_lseek,
79 .release = single_release, 109 .release = sockstat_seq_release,
80}; 110};
81 111
82/* snmp items */ 112/* snmp items */
@@ -423,25 +453,42 @@ static const struct file_operations netstat_seq_fops = {
423 .release = single_release, 453 .release = single_release,
424}; 454};
425 455
456static __net_init int ip_proc_init_net(struct net *net)
457{
458 if (!proc_net_fops_create(net, "sockstat", S_IRUGO, &sockstat_seq_fops))
459 return -ENOMEM;
460 return 0;
461}
462
463static __net_exit void ip_proc_exit_net(struct net *net)
464{
465 proc_net_remove(net, "sockstat");
466}
467
468static __net_initdata struct pernet_operations ip_proc_ops = {
469 .init = ip_proc_init_net,
470 .exit = ip_proc_exit_net,
471};
472
426int __init ip_misc_proc_init(void) 473int __init ip_misc_proc_init(void)
427{ 474{
428 int rc = 0; 475 int rc = 0;
429 476
477 if (register_pernet_subsys(&ip_proc_ops))
478 goto out_pernet;
479
430 if (!proc_net_fops_create(&init_net, "netstat", S_IRUGO, &netstat_seq_fops)) 480 if (!proc_net_fops_create(&init_net, "netstat", S_IRUGO, &netstat_seq_fops))
431 goto out_netstat; 481 goto out_netstat;
432 482
433 if (!proc_net_fops_create(&init_net, "snmp", S_IRUGO, &snmp_seq_fops)) 483 if (!proc_net_fops_create(&init_net, "snmp", S_IRUGO, &snmp_seq_fops))
434 goto out_snmp; 484 goto out_snmp;
435
436 if (!proc_net_fops_create(&init_net, "sockstat", S_IRUGO, &sockstat_seq_fops))
437 goto out_sockstat;
438out: 485out:
439 return rc; 486 return rc;
440out_sockstat:
441 proc_net_remove(&init_net, "snmp");
442out_snmp: 487out_snmp:
443 proc_net_remove(&init_net, "netstat"); 488 proc_net_remove(&init_net, "netstat");
444out_netstat: 489out_netstat:
490 unregister_pernet_subsys(&ip_proc_ops);
491out_pernet:
445 rc = -ENOMEM; 492 rc = -ENOMEM;
446 goto out; 493 goto out;
447} 494}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a3002fe65b7f..11d7f753a820 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -81,41 +81,34 @@
81#include <linux/netfilter_ipv4.h> 81#include <linux/netfilter_ipv4.h>
82 82
83static struct raw_hashinfo raw_v4_hashinfo = { 83static struct raw_hashinfo raw_v4_hashinfo = {
84 .lock = __RW_LOCK_UNLOCKED(), 84 .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
85}; 85};
86 86
87void raw_hash_sk(struct sock *sk, struct raw_hashinfo *h) 87void raw_hash_sk(struct sock *sk)
88{ 88{
89 struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
89 struct hlist_head *head; 90 struct hlist_head *head;
90 91
91 head = &h->ht[inet_sk(sk)->num & (RAW_HTABLE_SIZE - 1)]; 92 head = &h->ht[inet_sk(sk)->num & (RAW_HTABLE_SIZE - 1)];
92 93
93 write_lock_bh(&h->lock); 94 write_lock_bh(&h->lock);
94 sk_add_node(sk, head); 95 sk_add_node(sk, head);
95 sock_prot_inuse_add(sk->sk_prot, 1); 96 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
96 write_unlock_bh(&h->lock); 97 write_unlock_bh(&h->lock);
97} 98}
98EXPORT_SYMBOL_GPL(raw_hash_sk); 99EXPORT_SYMBOL_GPL(raw_hash_sk);
99 100
100void raw_unhash_sk(struct sock *sk, struct raw_hashinfo *h) 101void raw_unhash_sk(struct sock *sk)
101{ 102{
103 struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
104
102 write_lock_bh(&h->lock); 105 write_lock_bh(&h->lock);
103 if (sk_del_node_init(sk)) 106 if (sk_del_node_init(sk))
104 sock_prot_inuse_add(sk->sk_prot, -1); 107 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
105 write_unlock_bh(&h->lock); 108 write_unlock_bh(&h->lock);
106} 109}
107EXPORT_SYMBOL_GPL(raw_unhash_sk); 110EXPORT_SYMBOL_GPL(raw_unhash_sk);
108 111
109static void raw_v4_hash(struct sock *sk)
110{
111 raw_hash_sk(sk, &raw_v4_hashinfo);
112}
113
114static void raw_v4_unhash(struct sock *sk)
115{
116 raw_unhash_sk(sk, &raw_v4_hashinfo);
117}
118
119static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, 112static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
120 unsigned short num, __be32 raddr, __be32 laddr, int dif) 113 unsigned short num, __be32 raddr, __be32 laddr, int dif)
121{ 114{
@@ -124,7 +117,7 @@ static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
124 sk_for_each_from(sk, node) { 117 sk_for_each_from(sk, node) {
125 struct inet_sock *inet = inet_sk(sk); 118 struct inet_sock *inet = inet_sk(sk);
126 119
127 if (sk->sk_net == net && inet->num == num && 120 if (net_eq(sock_net(sk), net) && inet->num == num &&
128 !(inet->daddr && inet->daddr != raddr) && 121 !(inet->daddr && inet->daddr != raddr) &&
129 !(inet->rcv_saddr && inet->rcv_saddr != laddr) && 122 !(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
130 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 123 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
@@ -175,7 +168,7 @@ static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
175 if (hlist_empty(head)) 168 if (hlist_empty(head))
176 goto out; 169 goto out;
177 170
178 net = skb->dev->nd_net; 171 net = dev_net(skb->dev);
179 sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, 172 sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
180 iph->saddr, iph->daddr, 173 iph->saddr, iph->daddr,
181 skb->dev->ifindex); 174 skb->dev->ifindex);
@@ -283,7 +276,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
283 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); 276 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
284 if (raw_sk != NULL) { 277 if (raw_sk != NULL) {
285 iph = (struct iphdr *)skb->data; 278 iph = (struct iphdr *)skb->data;
286 net = skb->dev->nd_net; 279 net = dev_net(skb->dev);
287 280
288 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, 281 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
289 iph->daddr, iph->saddr, 282 iph->daddr, iph->saddr,
@@ -506,7 +499,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
506 ipc.oif = sk->sk_bound_dev_if; 499 ipc.oif = sk->sk_bound_dev_if;
507 500
508 if (msg->msg_controllen) { 501 if (msg->msg_controllen) {
509 err = ip_cmsg_send(msg, &ipc); 502 err = ip_cmsg_send(sock_net(sk), msg, &ipc);
510 if (err) 503 if (err)
511 goto out; 504 goto out;
512 if (ipc.opt) 505 if (ipc.opt)
@@ -560,7 +553,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
560 } 553 }
561 554
562 security_sk_classify_flow(sk, &fl); 555 security_sk_classify_flow(sk, &fl);
563 err = ip_route_output_flow(&init_net, &rt, &fl, sk, 1); 556 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
564 } 557 }
565 if (err) 558 if (err)
566 goto done; 559 goto done;
@@ -627,7 +620,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
627 620
628 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) 621 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
629 goto out; 622 goto out;
630 chk_addr_ret = inet_addr_type(sk->sk_net, addr->sin_addr.s_addr); 623 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
631 ret = -EADDRNOTAVAIL; 624 ret = -EADDRNOTAVAIL;
632 if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && 625 if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
633 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 626 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
@@ -825,8 +818,6 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
825 } 818 }
826} 819}
827 820
828DEFINE_PROTO_INUSE(raw)
829
830struct proto raw_prot = { 821struct proto raw_prot = {
831 .name = "RAW", 822 .name = "RAW",
832 .owner = THIS_MODULE, 823 .owner = THIS_MODULE,
@@ -841,14 +832,14 @@ struct proto raw_prot = {
841 .recvmsg = raw_recvmsg, 832 .recvmsg = raw_recvmsg,
842 .bind = raw_bind, 833 .bind = raw_bind,
843 .backlog_rcv = raw_rcv_skb, 834 .backlog_rcv = raw_rcv_skb,
844 .hash = raw_v4_hash, 835 .hash = raw_hash_sk,
845 .unhash = raw_v4_unhash, 836 .unhash = raw_unhash_sk,
846 .obj_size = sizeof(struct raw_sock), 837 .obj_size = sizeof(struct raw_sock),
838 .h.raw_hash = &raw_v4_hashinfo,
847#ifdef CONFIG_COMPAT 839#ifdef CONFIG_COMPAT
848 .compat_setsockopt = compat_raw_setsockopt, 840 .compat_setsockopt = compat_raw_setsockopt,
849 .compat_getsockopt = compat_raw_getsockopt, 841 .compat_getsockopt = compat_raw_getsockopt,
850#endif 842#endif
851 REF_PROTO_INUSE(raw)
852}; 843};
853 844
854#ifdef CONFIG_PROC_FS 845#ifdef CONFIG_PROC_FS
@@ -862,7 +853,7 @@ static struct sock *raw_get_first(struct seq_file *seq)
862 struct hlist_node *node; 853 struct hlist_node *node;
863 854
864 sk_for_each(sk, node, &state->h->ht[state->bucket]) 855 sk_for_each(sk, node, &state->h->ht[state->bucket])
865 if (sk->sk_net == state->p.net) 856 if (sock_net(sk) == seq_file_net(seq))
866 goto found; 857 goto found;
867 } 858 }
868 sk = NULL; 859 sk = NULL;
@@ -878,7 +869,7 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
878 sk = sk_next(sk); 869 sk = sk_next(sk);
879try_again: 870try_again:
880 ; 871 ;
881 } while (sk && sk->sk_net != state->p.net); 872 } while (sk && sock_net(sk) != seq_file_net(seq));
882 873
883 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { 874 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
884 sk = sk_head(&state->h->ht[state->bucket]); 875 sk = sk_head(&state->h->ht[state->bucket]);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7b5e8e1d94be..780e9484c825 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -118,21 +118,19 @@
118#define RT_GC_TIMEOUT (300*HZ) 118#define RT_GC_TIMEOUT (300*HZ)
119 119
120static int ip_rt_max_size; 120static int ip_rt_max_size;
121static int ip_rt_gc_timeout = RT_GC_TIMEOUT; 121static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
122static int ip_rt_gc_interval = 60 * HZ; 122static int ip_rt_gc_interval __read_mostly = 60 * HZ;
123static int ip_rt_gc_min_interval = HZ / 2; 123static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
124static int ip_rt_redirect_number = 9; 124static int ip_rt_redirect_number __read_mostly = 9;
125static int ip_rt_redirect_load = HZ / 50; 125static int ip_rt_redirect_load __read_mostly = HZ / 50;
126static int ip_rt_redirect_silence = ((HZ / 50) << (9 + 1)); 126static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127static int ip_rt_error_cost = HZ; 127static int ip_rt_error_cost __read_mostly = HZ;
128static int ip_rt_error_burst = 5 * HZ; 128static int ip_rt_error_burst __read_mostly = 5 * HZ;
129static int ip_rt_gc_elasticity = 8; 129static int ip_rt_gc_elasticity __read_mostly = 8;
130static int ip_rt_mtu_expires = 10 * 60 * HZ; 130static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131static int ip_rt_min_pmtu = 512 + 20 + 20; 131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132static int ip_rt_min_advmss = 256; 132static int ip_rt_min_advmss __read_mostly = 256;
133static int ip_rt_secret_interval = 10 * 60 * HZ; 133static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
134
135#define RTprint(a...) printk(KERN_DEBUG a)
136 134
137static void rt_worker_func(struct work_struct *work); 135static void rt_worker_func(struct work_struct *work);
138static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); 136static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
@@ -252,40 +250,41 @@ static inline void rt_hash_lock_init(void)
252} 250}
253#endif 251#endif
254 252
255static struct rt_hash_bucket *rt_hash_table; 253static struct rt_hash_bucket *rt_hash_table __read_mostly;
256static unsigned rt_hash_mask; 254static unsigned rt_hash_mask __read_mostly;
257static unsigned int rt_hash_log; 255static unsigned int rt_hash_log __read_mostly;
258static atomic_t rt_genid; 256static atomic_t rt_genid __read_mostly;
259 257
260static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 258static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
261#define RT_CACHE_STAT_INC(field) \ 259#define RT_CACHE_STAT_INC(field) \
262 (__raw_get_cpu_var(rt_cache_stat).field++) 260 (__raw_get_cpu_var(rt_cache_stat).field++)
263 261
264static unsigned int rt_hash_code(u32 daddr, u32 saddr) 262static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx)
265{ 263{
266 return jhash_2words(daddr, saddr, atomic_read(&rt_genid)) 264 return jhash_3words((__force u32)(__be32)(daddr),
265 (__force u32)(__be32)(saddr),
266 idx, atomic_read(&rt_genid))
267 & rt_hash_mask; 267 & rt_hash_mask;
268} 268}
269 269
270#define rt_hash(daddr, saddr, idx) \
271 rt_hash_code((__force u32)(__be32)(daddr),\
272 (__force u32)(__be32)(saddr) ^ ((idx) << 5))
273
274#ifdef CONFIG_PROC_FS 270#ifdef CONFIG_PROC_FS
275struct rt_cache_iter_state { 271struct rt_cache_iter_state {
272 struct seq_net_private p;
276 int bucket; 273 int bucket;
277 int genid; 274 int genid;
278}; 275};
279 276
280static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st) 277static struct rtable *rt_cache_get_first(struct seq_file *seq)
281{ 278{
279 struct rt_cache_iter_state *st = seq->private;
282 struct rtable *r = NULL; 280 struct rtable *r = NULL;
283 281
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 282 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
285 rcu_read_lock_bh(); 283 rcu_read_lock_bh();
286 r = rcu_dereference(rt_hash_table[st->bucket].chain); 284 r = rcu_dereference(rt_hash_table[st->bucket].chain);
287 while (r) { 285 while (r) {
288 if (r->rt_genid == st->genid) 286 if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
287 r->rt_genid == st->genid)
289 return r; 288 return r;
290 r = rcu_dereference(r->u.dst.rt_next); 289 r = rcu_dereference(r->u.dst.rt_next);
291 } 290 }
@@ -294,8 +293,10 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
294 return r; 293 return r;
295} 294}
296 295
297static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct rtable *r) 296static struct rtable *__rt_cache_get_next(struct seq_file *seq,
297 struct rtable *r)
298{ 298{
299 struct rt_cache_iter_state *st = seq->private;
299 r = r->u.dst.rt_next; 300 r = r->u.dst.rt_next;
300 while (!r) { 301 while (!r) {
301 rcu_read_unlock_bh(); 302 rcu_read_unlock_bh();
@@ -307,25 +308,34 @@ static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct r
307 return rcu_dereference(r); 308 return rcu_dereference(r);
308} 309}
309 310
310static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos) 311static struct rtable *rt_cache_get_next(struct seq_file *seq,
312 struct rtable *r)
313{
314 struct rt_cache_iter_state *st = seq->private;
315 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
316 if (dev_net(r->u.dst.dev) != seq_file_net(seq))
317 continue;
318 if (r->rt_genid == st->genid)
319 break;
320 }
321 return r;
322}
323
324static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
311{ 325{
312 struct rtable *r = rt_cache_get_first(st); 326 struct rtable *r = rt_cache_get_first(seq);
313 327
314 if (r) 328 if (r)
315 while (pos && (r = rt_cache_get_next(st, r))) { 329 while (pos && (r = rt_cache_get_next(seq, r)))
316 if (r->rt_genid != st->genid)
317 continue;
318 --pos; 330 --pos;
319 }
320 return pos ? NULL : r; 331 return pos ? NULL : r;
321} 332}
322 333
323static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 334static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
324{ 335{
325 struct rt_cache_iter_state *st = seq->private; 336 struct rt_cache_iter_state *st = seq->private;
326
327 if (*pos) 337 if (*pos)
328 return rt_cache_get_idx(st, *pos - 1); 338 return rt_cache_get_idx(seq, *pos - 1);
329 st->genid = atomic_read(&rt_genid); 339 st->genid = atomic_read(&rt_genid);
330 return SEQ_START_TOKEN; 340 return SEQ_START_TOKEN;
331} 341}
@@ -333,12 +343,11 @@ static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
333static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 343static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
334{ 344{
335 struct rtable *r; 345 struct rtable *r;
336 struct rt_cache_iter_state *st = seq->private;
337 346
338 if (v == SEQ_START_TOKEN) 347 if (v == SEQ_START_TOKEN)
339 r = rt_cache_get_first(st); 348 r = rt_cache_get_first(seq);
340 else 349 else
341 r = rt_cache_get_next(st, v); 350 r = rt_cache_get_next(seq, v);
342 ++*pos; 351 ++*pos;
343 return r; 352 return r;
344} 353}
@@ -390,7 +399,7 @@ static const struct seq_operations rt_cache_seq_ops = {
390 399
391static int rt_cache_seq_open(struct inode *inode, struct file *file) 400static int rt_cache_seq_open(struct inode *inode, struct file *file)
392{ 401{
393 return seq_open_private(file, &rt_cache_seq_ops, 402 return seq_open_net(inode, file, &rt_cache_seq_ops,
394 sizeof(struct rt_cache_iter_state)); 403 sizeof(struct rt_cache_iter_state));
395} 404}
396 405
@@ -399,7 +408,7 @@ static const struct file_operations rt_cache_seq_fops = {
399 .open = rt_cache_seq_open, 408 .open = rt_cache_seq_open,
400 .read = seq_read, 409 .read = seq_read,
401 .llseek = seq_lseek, 410 .llseek = seq_lseek,
402 .release = seq_release_private, 411 .release = seq_release_net,
403}; 412};
404 413
405 414
@@ -533,7 +542,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
533} 542}
534#endif 543#endif
535 544
536static __init int ip_rt_proc_init(struct net *net) 545static int __net_init ip_rt_do_proc_init(struct net *net)
537{ 546{
538 struct proc_dir_entry *pde; 547 struct proc_dir_entry *pde;
539 548
@@ -564,25 +573,43 @@ err2:
564err1: 573err1:
565 return -ENOMEM; 574 return -ENOMEM;
566} 575}
576
577static void __net_exit ip_rt_do_proc_exit(struct net *net)
578{
579 remove_proc_entry("rt_cache", net->proc_net_stat);
580 remove_proc_entry("rt_cache", net->proc_net);
581 remove_proc_entry("rt_acct", net->proc_net);
582}
583
584static struct pernet_operations ip_rt_proc_ops __net_initdata = {
585 .init = ip_rt_do_proc_init,
586 .exit = ip_rt_do_proc_exit,
587};
588
589static int __init ip_rt_proc_init(void)
590{
591 return register_pernet_subsys(&ip_rt_proc_ops);
592}
593
567#else 594#else
568static inline int ip_rt_proc_init(struct net *net) 595static inline int ip_rt_proc_init(void)
569{ 596{
570 return 0; 597 return 0;
571} 598}
572#endif /* CONFIG_PROC_FS */ 599#endif /* CONFIG_PROC_FS */
573 600
574static __inline__ void rt_free(struct rtable *rt) 601static inline void rt_free(struct rtable *rt)
575{ 602{
576 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 603 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
577} 604}
578 605
579static __inline__ void rt_drop(struct rtable *rt) 606static inline void rt_drop(struct rtable *rt)
580{ 607{
581 ip_rt_put(rt); 608 ip_rt_put(rt);
582 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 609 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
583} 610}
584 611
585static __inline__ int rt_fast_clean(struct rtable *rth) 612static inline int rt_fast_clean(struct rtable *rth)
586{ 613{
587 /* Kill broadcast/multicast entries very aggresively, if they 614 /* Kill broadcast/multicast entries very aggresively, if they
588 collide in hash table with more useful entries */ 615 collide in hash table with more useful entries */
@@ -590,7 +617,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
590 rth->fl.iif && rth->u.dst.rt_next; 617 rth->fl.iif && rth->u.dst.rt_next;
591} 618}
592 619
593static __inline__ int rt_valuable(struct rtable *rth) 620static inline int rt_valuable(struct rtable *rth)
594{ 621{
595 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || 622 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
596 rth->u.dst.expires; 623 rth->u.dst.expires;
@@ -652,7 +679,7 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
652 679
653static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 680static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
654{ 681{
655 return rt1->u.dst.dev->nd_net == rt2->u.dst.dev->nd_net; 682 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
656} 683}
657 684
658/* 685/*
@@ -1032,10 +1059,10 @@ restart:
1032#if RT_CACHE_DEBUG >= 2 1059#if RT_CACHE_DEBUG >= 2
1033 if (rt->u.dst.rt_next) { 1060 if (rt->u.dst.rt_next) {
1034 struct rtable *trt; 1061 struct rtable *trt;
1035 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash, 1062 printk(KERN_DEBUG "rt_cache @%02x: " NIPQUAD_FMT, hash,
1036 NIPQUAD(rt->rt_dst)); 1063 NIPQUAD(rt->rt_dst));
1037 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) 1064 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1038 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst)); 1065 printk(" . " NIPQUAD_FMT, NIPQUAD(trt->rt_dst));
1039 printk("\n"); 1066 printk("\n");
1040 } 1067 }
1041#endif 1068#endif
@@ -1131,10 +1158,12 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1131 __be32 skeys[2] = { saddr, 0 }; 1158 __be32 skeys[2] = { saddr, 0 };
1132 int ikeys[2] = { dev->ifindex, 0 }; 1159 int ikeys[2] = { dev->ifindex, 0 };
1133 struct netevent_redirect netevent; 1160 struct netevent_redirect netevent;
1161 struct net *net;
1134 1162
1135 if (!in_dev) 1163 if (!in_dev)
1136 return; 1164 return;
1137 1165
1166 net = dev_net(dev);
1138 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) 1167 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1139 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) 1168 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
1140 || ipv4_is_zeronet(new_gw)) 1169 || ipv4_is_zeronet(new_gw))
@@ -1146,7 +1175,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1146 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) 1175 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1147 goto reject_redirect; 1176 goto reject_redirect;
1148 } else { 1177 } else {
1149 if (inet_addr_type(&init_net, new_gw) != RTN_UNICAST) 1178 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1150 goto reject_redirect; 1179 goto reject_redirect;
1151 } 1180 }
1152 1181
@@ -1164,7 +1193,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1164 rth->fl.fl4_src != skeys[i] || 1193 rth->fl.fl4_src != skeys[i] ||
1165 rth->fl.oif != ikeys[k] || 1194 rth->fl.oif != ikeys[k] ||
1166 rth->fl.iif != 0 || 1195 rth->fl.iif != 0 ||
1167 rth->rt_genid != atomic_read(&rt_genid)) { 1196 rth->rt_genid != atomic_read(&rt_genid) ||
1197 !net_eq(dev_net(rth->u.dst.dev), net)) {
1168 rthp = &rth->u.dst.rt_next; 1198 rthp = &rth->u.dst.rt_next;
1169 continue; 1199 continue;
1170 } 1200 }
@@ -1245,9 +1275,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1245reject_redirect: 1275reject_redirect:
1246#ifdef CONFIG_IP_ROUTE_VERBOSE 1276#ifdef CONFIG_IP_ROUTE_VERBOSE
1247 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) 1277 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1248 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about " 1278 printk(KERN_INFO "Redirect from " NIPQUAD_FMT " on %s about "
1249 "%u.%u.%u.%u ignored.\n" 1279 NIPQUAD_FMT " ignored.\n"
1250 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n", 1280 " Advised path = " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
1251 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw), 1281 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1252 NIPQUAD(saddr), NIPQUAD(daddr)); 1282 NIPQUAD(saddr), NIPQUAD(daddr));
1253#endif 1283#endif
@@ -1256,7 +1286,7 @@ reject_redirect:
1256 1286
1257static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 1287static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1258{ 1288{
1259 struct rtable *rt = (struct rtable*)dst; 1289 struct rtable *rt = (struct rtable *)dst;
1260 struct dst_entry *ret = dst; 1290 struct dst_entry *ret = dst;
1261 1291
1262 if (rt) { 1292 if (rt) {
@@ -1269,7 +1299,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1269 rt->fl.oif); 1299 rt->fl.oif);
1270#if RT_CACHE_DEBUG >= 1 1300#if RT_CACHE_DEBUG >= 1
1271 printk(KERN_DEBUG "ipv4_negative_advice: redirect to " 1301 printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1272 "%u.%u.%u.%u/%02x dropped\n", 1302 NIPQUAD_FMT "/%02x dropped\n",
1273 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos); 1303 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1274#endif 1304#endif
1275 rt_del(hash, rt); 1305 rt_del(hash, rt);
@@ -1297,7 +1327,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1297 1327
1298void ip_rt_send_redirect(struct sk_buff *skb) 1328void ip_rt_send_redirect(struct sk_buff *skb)
1299{ 1329{
1300 struct rtable *rt = (struct rtable*)skb->dst; 1330 struct rtable *rt = skb->rtable;
1301 struct in_device *in_dev = in_dev_get(rt->u.dst.dev); 1331 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1302 1332
1303 if (!in_dev) 1333 if (!in_dev)
@@ -1334,8 +1364,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1334 if (IN_DEV_LOG_MARTIANS(in_dev) && 1364 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1335 rt->u.dst.rate_tokens == ip_rt_redirect_number && 1365 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1336 net_ratelimit()) 1366 net_ratelimit())
1337 printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores " 1367 printk(KERN_WARNING "host " NIPQUAD_FMT "/if%d ignores "
1338 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n", 1368 "redirects for " NIPQUAD_FMT " to " NIPQUAD_FMT ".\n",
1339 NIPQUAD(rt->rt_src), rt->rt_iif, 1369 NIPQUAD(rt->rt_src), rt->rt_iif,
1340 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway)); 1370 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1341#endif 1371#endif
@@ -1346,7 +1376,7 @@ out:
1346 1376
1347static int ip_error(struct sk_buff *skb) 1377static int ip_error(struct sk_buff *skb)
1348{ 1378{
1349 struct rtable *rt = (struct rtable*)skb->dst; 1379 struct rtable *rt = skb->rtable;
1350 unsigned long now; 1380 unsigned long now;
1351 int code; 1381 int code;
1352 1382
@@ -1388,7 +1418,7 @@ out: kfree_skb(skb);
1388static const unsigned short mtu_plateau[] = 1418static const unsigned short mtu_plateau[] =
1389{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 }; 1419{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1390 1420
1391static __inline__ unsigned short guess_mtu(unsigned short old_mtu) 1421static inline unsigned short guess_mtu(unsigned short old_mtu)
1392{ 1422{
1393 int i; 1423 int i;
1394 1424
@@ -1423,7 +1453,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1423 rth->rt_src == iph->saddr && 1453 rth->rt_src == iph->saddr &&
1424 rth->fl.iif == 0 && 1454 rth->fl.iif == 0 &&
1425 !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) && 1455 !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) &&
1426 rth->u.dst.dev->nd_net == net && 1456 net_eq(dev_net(rth->u.dst.dev), net) &&
1427 rth->rt_genid == atomic_read(&rt_genid)) { 1457 rth->rt_genid == atomic_read(&rt_genid)) {
1428 unsigned short mtu = new_mtu; 1458 unsigned short mtu = new_mtu;
1429 1459
@@ -1499,9 +1529,9 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1499{ 1529{
1500 struct rtable *rt = (struct rtable *) dst; 1530 struct rtable *rt = (struct rtable *) dst;
1501 struct in_device *idev = rt->idev; 1531 struct in_device *idev = rt->idev;
1502 if (dev != dev->nd_net->loopback_dev && idev && idev->dev == dev) { 1532 if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
1503 struct in_device *loopback_idev = 1533 struct in_device *loopback_idev =
1504 in_dev_get(dev->nd_net->loopback_dev); 1534 in_dev_get(dev_net(dev)->loopback_dev);
1505 if (loopback_idev) { 1535 if (loopback_idev) {
1506 rt->idev = loopback_idev; 1536 rt->idev = loopback_idev;
1507 in_dev_put(idev); 1537 in_dev_put(idev);
@@ -1515,14 +1545,14 @@ static void ipv4_link_failure(struct sk_buff *skb)
1515 1545
1516 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1546 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1517 1547
1518 rt = (struct rtable *) skb->dst; 1548 rt = skb->rtable;
1519 if (rt) 1549 if (rt)
1520 dst_set_expires(&rt->u.dst, 0); 1550 dst_set_expires(&rt->u.dst, 0);
1521} 1551}
1522 1552
1523static int ip_rt_bug(struct sk_buff *skb) 1553static int ip_rt_bug(struct sk_buff *skb)
1524{ 1554{
1525 printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n", 1555 printk(KERN_DEBUG "ip_rt_bug: " NIPQUAD_FMT " -> " NIPQUAD_FMT ", %s\n",
1526 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr), 1556 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
1527 skb->dev ? skb->dev->name : "?"); 1557 skb->dev ? skb->dev->name : "?");
1528 kfree_skb(skb); 1558 kfree_skb(skb);
@@ -1545,7 +1575,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1545 1575
1546 if (rt->fl.iif == 0) 1576 if (rt->fl.iif == 0)
1547 src = rt->rt_src; 1577 src = rt->rt_src;
1548 else if (fib_lookup(rt->u.dst.dev->nd_net, &rt->fl, &res) == 0) { 1578 else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) {
1549 src = FIB_RES_PREFSRC(res); 1579 src = FIB_RES_PREFSRC(res);
1550 fib_res_put(&res); 1580 fib_res_put(&res);
1551 } else 1581 } else
@@ -1675,7 +1705,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1675 1705
1676 in_dev_put(in_dev); 1706 in_dev_put(in_dev);
1677 hash = rt_hash(daddr, saddr, dev->ifindex); 1707 hash = rt_hash(daddr, saddr, dev->ifindex);
1678 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst); 1708 return rt_intern_hash(hash, rth, &skb->rtable);
1679 1709
1680e_nobufs: 1710e_nobufs:
1681 in_dev_put(in_dev); 1711 in_dev_put(in_dev);
@@ -1700,8 +1730,8 @@ static void ip_handle_martian_source(struct net_device *dev,
1700 * RFC1812 recommendation, if source is martian, 1730 * RFC1812 recommendation, if source is martian,
1701 * the only hint is MAC header. 1731 * the only hint is MAC header.
1702 */ 1732 */
1703 printk(KERN_WARNING "martian source %u.%u.%u.%u from " 1733 printk(KERN_WARNING "martian source " NIPQUAD_FMT " from "
1704 "%u.%u.%u.%u, on dev %s\n", 1734 NIPQUAD_FMT", on dev %s\n",
1705 NIPQUAD(daddr), NIPQUAD(saddr), dev->name); 1735 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1706 if (dev->hard_header_len && skb_mac_header_was_set(skb)) { 1736 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1707 int i; 1737 int i;
@@ -1718,11 +1748,11 @@ static void ip_handle_martian_source(struct net_device *dev,
1718#endif 1748#endif
1719} 1749}
1720 1750
1721static inline int __mkroute_input(struct sk_buff *skb, 1751static int __mkroute_input(struct sk_buff *skb,
1722 struct fib_result* res, 1752 struct fib_result *res,
1723 struct in_device *in_dev, 1753 struct in_device *in_dev,
1724 __be32 daddr, __be32 saddr, u32 tos, 1754 __be32 daddr, __be32 saddr, u32 tos,
1725 struct rtable **result) 1755 struct rtable **result)
1726{ 1756{
1727 1757
1728 struct rtable *rth; 1758 struct rtable *rth;
@@ -1814,11 +1844,11 @@ static inline int __mkroute_input(struct sk_buff *skb,
1814 return err; 1844 return err;
1815} 1845}
1816 1846
1817static inline int ip_mkroute_input(struct sk_buff *skb, 1847static int ip_mkroute_input(struct sk_buff *skb,
1818 struct fib_result* res, 1848 struct fib_result *res,
1819 const struct flowi *fl, 1849 const struct flowi *fl,
1820 struct in_device *in_dev, 1850 struct in_device *in_dev,
1821 __be32 daddr, __be32 saddr, u32 tos) 1851 __be32 daddr, __be32 saddr, u32 tos)
1822{ 1852{
1823 struct rtable* rth = NULL; 1853 struct rtable* rth = NULL;
1824 int err; 1854 int err;
@@ -1836,7 +1866,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1836 1866
1837 /* put it into the cache */ 1867 /* put it into the cache */
1838 hash = rt_hash(daddr, saddr, fl->iif); 1868 hash = rt_hash(daddr, saddr, fl->iif);
1839 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1869 return rt_intern_hash(hash, rth, &skb->rtable);
1840} 1870}
1841 1871
1842/* 1872/*
@@ -1869,7 +1899,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1869 __be32 spec_dst; 1899 __be32 spec_dst;
1870 int err = -EINVAL; 1900 int err = -EINVAL;
1871 int free_res = 0; 1901 int free_res = 0;
1872 struct net * net = dev->nd_net; 1902 struct net * net = dev_net(dev);
1873 1903
1874 /* IP on this device is disabled. */ 1904 /* IP on this device is disabled. */
1875 1905
@@ -1992,7 +2022,7 @@ local_input:
1992 } 2022 }
1993 rth->rt_type = res.type; 2023 rth->rt_type = res.type;
1994 hash = rt_hash(daddr, saddr, fl.iif); 2024 hash = rt_hash(daddr, saddr, fl.iif);
1995 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 2025 err = rt_intern_hash(hash, rth, &skb->rtable);
1996 goto done; 2026 goto done;
1997 2027
1998no_route: 2028no_route:
@@ -2010,8 +2040,8 @@ martian_destination:
2010 RT_CACHE_STAT_INC(in_martian_dst); 2040 RT_CACHE_STAT_INC(in_martian_dst);
2011#ifdef CONFIG_IP_ROUTE_VERBOSE 2041#ifdef CONFIG_IP_ROUTE_VERBOSE
2012 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) 2042 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2013 printk(KERN_WARNING "martian destination %u.%u.%u.%u from " 2043 printk(KERN_WARNING "martian destination " NIPQUAD_FMT " from "
2014 "%u.%u.%u.%u, dev %s\n", 2044 NIPQUAD_FMT ", dev %s\n",
2015 NIPQUAD(daddr), NIPQUAD(saddr), dev->name); 2045 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
2016#endif 2046#endif
2017 2047
@@ -2040,25 +2070,25 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2040 int iif = dev->ifindex; 2070 int iif = dev->ifindex;
2041 struct net *net; 2071 struct net *net;
2042 2072
2043 net = dev->nd_net; 2073 net = dev_net(dev);
2044 tos &= IPTOS_RT_MASK; 2074 tos &= IPTOS_RT_MASK;
2045 hash = rt_hash(daddr, saddr, iif); 2075 hash = rt_hash(daddr, saddr, iif);
2046 2076
2047 rcu_read_lock(); 2077 rcu_read_lock();
2048 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2078 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2049 rth = rcu_dereference(rth->u.dst.rt_next)) { 2079 rth = rcu_dereference(rth->u.dst.rt_next)) {
2050 if (rth->fl.fl4_dst == daddr && 2080 if (((rth->fl.fl4_dst ^ daddr) |
2051 rth->fl.fl4_src == saddr && 2081 (rth->fl.fl4_src ^ saddr) |
2052 rth->fl.iif == iif && 2082 (rth->fl.iif ^ iif) |
2053 rth->fl.oif == 0 && 2083 rth->fl.oif |
2084 (rth->fl.fl4_tos ^ tos)) == 0 &&
2054 rth->fl.mark == skb->mark && 2085 rth->fl.mark == skb->mark &&
2055 rth->fl.fl4_tos == tos && 2086 net_eq(dev_net(rth->u.dst.dev), net) &&
2056 rth->u.dst.dev->nd_net == net &&
2057 rth->rt_genid == atomic_read(&rt_genid)) { 2087 rth->rt_genid == atomic_read(&rt_genid)) {
2058 dst_use(&rth->u.dst, jiffies); 2088 dst_use(&rth->u.dst, jiffies);
2059 RT_CACHE_STAT_INC(in_hit); 2089 RT_CACHE_STAT_INC(in_hit);
2060 rcu_read_unlock(); 2090 rcu_read_unlock();
2061 skb->dst = (struct dst_entry*)rth; 2091 skb->rtable = rth;
2062 return 0; 2092 return 0;
2063 } 2093 }
2064 RT_CACHE_STAT_INC(in_hlist_search); 2094 RT_CACHE_STAT_INC(in_hlist_search);
@@ -2100,12 +2130,12 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2100 return ip_route_input_slow(skb, daddr, saddr, tos, dev); 2130 return ip_route_input_slow(skb, daddr, saddr, tos, dev);
2101} 2131}
2102 2132
2103static inline int __mkroute_output(struct rtable **result, 2133static int __mkroute_output(struct rtable **result,
2104 struct fib_result* res, 2134 struct fib_result *res,
2105 const struct flowi *fl, 2135 const struct flowi *fl,
2106 const struct flowi *oldflp, 2136 const struct flowi *oldflp,
2107 struct net_device *dev_out, 2137 struct net_device *dev_out,
2108 unsigned flags) 2138 unsigned flags)
2109{ 2139{
2110 struct rtable *rth; 2140 struct rtable *rth;
2111 struct in_device *in_dev; 2141 struct in_device *in_dev;
@@ -2220,12 +2250,12 @@ static inline int __mkroute_output(struct rtable **result,
2220 return err; 2250 return err;
2221} 2251}
2222 2252
2223static inline int ip_mkroute_output(struct rtable **rp, 2253static int ip_mkroute_output(struct rtable **rp,
2224 struct fib_result* res, 2254 struct fib_result *res,
2225 const struct flowi *fl, 2255 const struct flowi *fl,
2226 const struct flowi *oldflp, 2256 const struct flowi *oldflp,
2227 struct net_device *dev_out, 2257 struct net_device *dev_out,
2228 unsigned flags) 2258 unsigned flags)
2229{ 2259{
2230 struct rtable *rth = NULL; 2260 struct rtable *rth = NULL;
2231 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); 2261 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
@@ -2455,7 +2485,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2455 rth->fl.mark == flp->mark && 2485 rth->fl.mark == flp->mark &&
2456 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2486 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2457 (IPTOS_RT_MASK | RTO_ONLINK)) && 2487 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2458 rth->u.dst.dev->nd_net == net && 2488 net_eq(dev_net(rth->u.dst.dev), net) &&
2459 rth->rt_genid == atomic_read(&rt_genid)) { 2489 rth->rt_genid == atomic_read(&rt_genid)) {
2460 dst_use(&rth->u.dst, jiffies); 2490 dst_use(&rth->u.dst, jiffies);
2461 RT_CACHE_STAT_INC(out_hit); 2491 RT_CACHE_STAT_INC(out_hit);
@@ -2487,7 +2517,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2487}; 2517};
2488 2518
2489 2519
2490static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock *sk) 2520static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp)
2491{ 2521{
2492 struct rtable *ort = *rp; 2522 struct rtable *ort = *rp;
2493 struct rtable *rt = (struct rtable *) 2523 struct rtable *rt = (struct rtable *)
@@ -2547,7 +2577,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2547 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, 2577 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
2548 flags ? XFRM_LOOKUP_WAIT : 0); 2578 flags ? XFRM_LOOKUP_WAIT : 0);
2549 if (err == -EREMOTE) 2579 if (err == -EREMOTE)
2550 err = ipv4_dst_blackhole(rp, flp, sk); 2580 err = ipv4_dst_blackhole(rp, flp);
2551 2581
2552 return err; 2582 return err;
2553 } 2583 }
@@ -2565,7 +2595,7 @@ int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2565static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 2595static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2566 int nowait, unsigned int flags) 2596 int nowait, unsigned int flags)
2567{ 2597{
2568 struct rtable *rt = (struct rtable*)skb->dst; 2598 struct rtable *rt = skb->rtable;
2569 struct rtmsg *r; 2599 struct rtmsg *r;
2570 struct nlmsghdr *nlh; 2600 struct nlmsghdr *nlh;
2571 long expires; 2601 long expires;
@@ -2658,7 +2688,7 @@ nla_put_failure:
2658 2688
2659static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) 2689static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2660{ 2690{
2661 struct net *net = in_skb->sk->sk_net; 2691 struct net *net = sock_net(in_skb->sk);
2662 struct rtmsg *rtm; 2692 struct rtmsg *rtm;
2663 struct nlattr *tb[RTA_MAX+1]; 2693 struct nlattr *tb[RTA_MAX+1];
2664 struct rtable *rt = NULL; 2694 struct rtable *rt = NULL;
@@ -2668,9 +2698,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2668 int err; 2698 int err;
2669 struct sk_buff *skb; 2699 struct sk_buff *skb;
2670 2700
2671 if (net != &init_net)
2672 return -EINVAL;
2673
2674 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); 2701 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2675 if (err < 0) 2702 if (err < 0)
2676 goto errout; 2703 goto errout;
@@ -2700,7 +2727,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2700 if (iif) { 2727 if (iif) {
2701 struct net_device *dev; 2728 struct net_device *dev;
2702 2729
2703 dev = __dev_get_by_index(&init_net, iif); 2730 dev = __dev_get_by_index(net, iif);
2704 if (dev == NULL) { 2731 if (dev == NULL) {
2705 err = -ENODEV; 2732 err = -ENODEV;
2706 goto errout_free; 2733 goto errout_free;
@@ -2712,7 +2739,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2712 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); 2739 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2713 local_bh_enable(); 2740 local_bh_enable();
2714 2741
2715 rt = (struct rtable*) skb->dst; 2742 rt = skb->rtable;
2716 if (err == 0 && rt->u.dst.error) 2743 if (err == 0 && rt->u.dst.error)
2717 err = -rt->u.dst.error; 2744 err = -rt->u.dst.error;
2718 } else { 2745 } else {
@@ -2726,22 +2753,22 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2726 }, 2753 },
2727 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, 2754 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2728 }; 2755 };
2729 err = ip_route_output_key(&init_net, &rt, &fl); 2756 err = ip_route_output_key(net, &rt, &fl);
2730 } 2757 }
2731 2758
2732 if (err) 2759 if (err)
2733 goto errout_free; 2760 goto errout_free;
2734 2761
2735 skb->dst = &rt->u.dst; 2762 skb->rtable = rt;
2736 if (rtm->rtm_flags & RTM_F_NOTIFY) 2763 if (rtm->rtm_flags & RTM_F_NOTIFY)
2737 rt->rt_flags |= RTCF_NOTIFY; 2764 rt->rt_flags |= RTCF_NOTIFY;
2738 2765
2739 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 2766 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2740 RTM_NEWROUTE, 0, 0); 2767 RTM_NEWROUTE, 0, 0);
2741 if (err <= 0) 2768 if (err <= 0)
2742 goto errout_free; 2769 goto errout_free;
2743 2770
2744 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 2771 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2745errout: 2772errout:
2746 return err; 2773 return err;
2747 2774
@@ -2755,6 +2782,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2755 struct rtable *rt; 2782 struct rtable *rt;
2756 int h, s_h; 2783 int h, s_h;
2757 int idx, s_idx; 2784 int idx, s_idx;
2785 struct net *net;
2786
2787 net = sock_net(skb->sk);
2758 2788
2759 s_h = cb->args[0]; 2789 s_h = cb->args[0];
2760 if (s_h < 0) 2790 if (s_h < 0)
@@ -2764,7 +2794,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2764 rcu_read_lock_bh(); 2794 rcu_read_lock_bh();
2765 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2795 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2766 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2796 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2767 if (idx < s_idx) 2797 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
2768 continue; 2798 continue;
2769 if (rt->rt_genid != atomic_read(&rt_genid)) 2799 if (rt->rt_genid != atomic_read(&rt_genid))
2770 continue; 2800 continue;
@@ -3028,7 +3058,9 @@ int __init ip_rt_init(void)
3028 devinet_init(); 3058 devinet_init();
3029 ip_fib_init(); 3059 ip_fib_init();
3030 3060
3031 setup_timer(&rt_secret_timer, rt_secret_rebuild, 0); 3061 rt_secret_timer.function = rt_secret_rebuild;
3062 rt_secret_timer.data = 0;
3063 init_timer_deferrable(&rt_secret_timer);
3032 3064
3033 /* All the timers, started at system startup tend 3065 /* All the timers, started at system startup tend
3034 to synchronize. Perturb it a bit. 3066 to synchronize. Perturb it a bit.
@@ -3040,7 +3072,7 @@ int __init ip_rt_init(void)
3040 ip_rt_secret_interval; 3072 ip_rt_secret_interval;
3041 add_timer(&rt_secret_timer); 3073 add_timer(&rt_secret_timer);
3042 3074
3043 if (ip_rt_proc_init(&init_net)) 3075 if (ip_rt_proc_init())
3044 printk(KERN_ERR "Unable to create route proc files\n"); 3076 printk(KERN_ERR "Unable to create route proc files\n");
3045#ifdef CONFIG_XFRM 3077#ifdef CONFIG_XFRM
3046 xfrm_init(); 3078 xfrm_init();
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index f470fe4511db..73ba98921d64 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -10,8 +10,6 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $ 12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $
13 *
14 * Missing: IPv6 support.
15 */ 13 */
16 14
17#include <linux/tcp.h> 15#include <linux/tcp.h>
@@ -21,26 +19,33 @@
21#include <linux/kernel.h> 19#include <linux/kernel.h>
22#include <net/tcp.h> 20#include <net/tcp.h>
23 21
22/* Timestamps: lowest 9 bits store TCP options */
23#define TSBITS 9
24#define TSMASK (((__u32)1 << TSBITS) - 1)
25
24extern int sysctl_tcp_syncookies; 26extern int sysctl_tcp_syncookies;
25 27
26static __u32 syncookie_secret[2][16-3+SHA_DIGEST_WORDS]; 28__u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
29EXPORT_SYMBOL(syncookie_secret);
27 30
28static __init int init_syncookies(void) 31static __init int init_syncookies(void)
29{ 32{
30 get_random_bytes(syncookie_secret, sizeof(syncookie_secret)); 33 get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
31 return 0; 34 return 0;
32} 35}
33module_init(init_syncookies); 36__initcall(init_syncookies);
34 37
35#define COOKIEBITS 24 /* Upper bits store count */ 38#define COOKIEBITS 24 /* Upper bits store count */
36#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) 39#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
37 40
41static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
42
38static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, 43static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
39 u32 count, int c) 44 u32 count, int c)
40{ 45{
41 __u32 tmp[16 + 5 + SHA_WORKSPACE_WORDS]; 46 __u32 *tmp = __get_cpu_var(cookie_scratch);
42 47
43 memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c])); 48 memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
44 tmp[0] = (__force u32)saddr; 49 tmp[0] = (__force u32)saddr;
45 tmp[1] = (__force u32)daddr; 50 tmp[1] = (__force u32)daddr;
46 tmp[2] = ((__force u32)sport << 16) + (__force u32)dport; 51 tmp[2] = ((__force u32)sport << 16) + (__force u32)dport;
@@ -50,6 +55,39 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
50 return tmp[17]; 55 return tmp[17];
51} 56}
52 57
58
59/*
60 * when syncookies are in effect and tcp timestamps are enabled we encode
61 * tcp options in the lowest 9 bits of the timestamp value that will be
62 * sent in the syn-ack.
63 * Since subsequent timestamps use the normal tcp_time_stamp value, we
64 * must make sure that the resulting initial timestamp is <= tcp_time_stamp.
65 */
66__u32 cookie_init_timestamp(struct request_sock *req)
67{
68 struct inet_request_sock *ireq;
69 u32 ts, ts_now = tcp_time_stamp;
70 u32 options = 0;
71
72 ireq = inet_rsk(req);
73 if (ireq->wscale_ok) {
74 options = ireq->snd_wscale;
75 options |= ireq->rcv_wscale << 4;
76 }
77 options |= ireq->sack_ok << 8;
78
79 ts = ts_now & ~TSMASK;
80 ts |= options;
81 if (ts > ts_now) {
82 ts >>= TSBITS;
83 ts--;
84 ts <<= TSBITS;
85 ts |= options;
86 }
87 return ts;
88}
89
90
53static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport, 91static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
54 __be16 dport, __u32 sseq, __u32 count, 92 __be16 dport, __u32 sseq, __u32 count,
55 __u32 data) 93 __u32 data)
@@ -184,6 +222,35 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
184 return child; 222 return child;
185} 223}
186 224
225
226/*
227 * when syncookies are in effect and tcp timestamps are enabled we stored
228 * additional tcp options in the timestamp.
229 * This extracts these options from the timestamp echo.
230 *
231 * The lowest 4 bits are for snd_wscale
232 * The next 4 lsb are for rcv_wscale
233 * The next lsb is for sack_ok
234 */
235void cookie_check_timestamp(struct tcp_options_received *tcp_opt)
236{
237 /* echoed timestamp, 9 lowest bits contain options */
238 u32 options = tcp_opt->rcv_tsecr & TSMASK;
239
240 tcp_opt->snd_wscale = options & 0xf;
241 options >>= 4;
242 tcp_opt->rcv_wscale = options & 0xf;
243
244 tcp_opt->sack_ok = (options >> 4) & 0x1;
245
246 if (tcp_opt->sack_ok)
247 tcp_sack_reset(tcp_opt);
248
249 if (tcp_opt->snd_wscale || tcp_opt->rcv_wscale)
250 tcp_opt->wscale_ok = 1;
251}
252EXPORT_SYMBOL(cookie_check_timestamp);
253
187struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 254struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
188 struct ip_options *opt) 255 struct ip_options *opt)
189{ 256{
@@ -197,6 +264,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
197 int mss; 264 int mss;
198 struct rtable *rt; 265 struct rtable *rt;
199 __u8 rcv_wscale; 266 __u8 rcv_wscale;
267 struct tcp_options_received tcp_opt;
200 268
201 if (!sysctl_tcp_syncookies || !th->ack) 269 if (!sysctl_tcp_syncookies || !th->ack)
202 goto out; 270 goto out;
@@ -209,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
209 277
210 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); 278 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
211 279
280 /* check for timestamp cookie support */
281 memset(&tcp_opt, 0, sizeof(tcp_opt));
282 tcp_parse_options(skb, &tcp_opt, 0);
283
284 if (tcp_opt.saw_tstamp)
285 cookie_check_timestamp(&tcp_opt);
286
212 ret = NULL; 287 ret = NULL;
213 req = reqsk_alloc(&tcp_request_sock_ops); /* for safety */ 288 req = reqsk_alloc(&tcp_request_sock_ops); /* for safety */
214 if (!req) 289 if (!req)
@@ -227,6 +302,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
227 ireq->loc_addr = ip_hdr(skb)->daddr; 302 ireq->loc_addr = ip_hdr(skb)->daddr;
228 ireq->rmt_addr = ip_hdr(skb)->saddr; 303 ireq->rmt_addr = ip_hdr(skb)->saddr;
229 ireq->opt = NULL; 304 ireq->opt = NULL;
305 ireq->snd_wscale = tcp_opt.snd_wscale;
306 ireq->rcv_wscale = tcp_opt.rcv_wscale;
307 ireq->sack_ok = tcp_opt.sack_ok;
308 ireq->wscale_ok = tcp_opt.wscale_ok;
309 ireq->tstamp_ok = tcp_opt.saw_tstamp;
310 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
230 311
231 /* We throwed the options of the initial SYN away, so we hope 312 /* We throwed the options of the initial SYN away, so we hope
232 * the ACK carries the same options again (see RFC1122 4.2.3.8) 313 * the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -241,8 +322,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
241 } 322 }
242 } 323 }
243 324
244 ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0;
245 ireq->wscale_ok = ireq->sack_ok = 0;
246 req->expires = 0UL; 325 req->expires = 0UL;
247 req->retrans = 0; 326 req->retrans = 0;
248 327
@@ -271,11 +350,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
271 } 350 }
272 351
273 /* Try to redo what tcp_v4_send_synack did. */ 352 /* Try to redo what tcp_v4_send_synack did. */
274 req->window_clamp = dst_metric(&rt->u.dst, RTAX_WINDOW); 353 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
354
275 tcp_select_initial_window(tcp_full_space(sk), req->mss, 355 tcp_select_initial_window(tcp_full_space(sk), req->mss,
276 &req->rcv_wnd, &req->window_clamp, 356 &req->rcv_wnd, &req->window_clamp,
277 0, &rcv_wscale); 357 ireq->wscale_ok, &rcv_wscale);
278 /* BTW win scale with syncookies is 0 by definition */ 358
279 ireq->rcv_wscale = rcv_wscale; 359 ireq->rcv_wscale = rcv_wscale;
280 360
281 ret = get_cookie_sock(sk, skb, req, &rt->u.dst); 361 ret = get_cookie_sock(sk, skb, req, &rt->u.dst);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 88286f35d1e2..c437f804ee38 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -404,38 +404,6 @@ static struct ctl_table ipv4_table[] = {
404 .strategy = &ipv4_sysctl_local_port_range, 404 .strategy = &ipv4_sysctl_local_port_range,
405 }, 405 },
406 { 406 {
407 .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_ALL,
408 .procname = "icmp_echo_ignore_all",
409 .data = &sysctl_icmp_echo_ignore_all,
410 .maxlen = sizeof(int),
411 .mode = 0644,
412 .proc_handler = &proc_dointvec
413 },
414 {
415 .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS,
416 .procname = "icmp_echo_ignore_broadcasts",
417 .data = &sysctl_icmp_echo_ignore_broadcasts,
418 .maxlen = sizeof(int),
419 .mode = 0644,
420 .proc_handler = &proc_dointvec
421 },
422 {
423 .ctl_name = NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES,
424 .procname = "icmp_ignore_bogus_error_responses",
425 .data = &sysctl_icmp_ignore_bogus_error_responses,
426 .maxlen = sizeof(int),
427 .mode = 0644,
428 .proc_handler = &proc_dointvec
429 },
430 {
431 .ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR,
432 .procname = "icmp_errors_use_inbound_ifaddr",
433 .data = &sysctl_icmp_errors_use_inbound_ifaddr,
434 .maxlen = sizeof(int),
435 .mode = 0644,
436 .proc_handler = &proc_dointvec
437 },
438 {
439 .ctl_name = NET_IPV4_ROUTE, 407 .ctl_name = NET_IPV4_ROUTE,
440 .procname = "route", 408 .procname = "route",
441 .maxlen = 0, 409 .maxlen = 0,
@@ -586,22 +554,6 @@ static struct ctl_table ipv4_table[] = {
586 .proc_handler = &proc_dointvec 554 .proc_handler = &proc_dointvec
587 }, 555 },
588 { 556 {
589 .ctl_name = NET_IPV4_ICMP_RATELIMIT,
590 .procname = "icmp_ratelimit",
591 .data = &sysctl_icmp_ratelimit,
592 .maxlen = sizeof(int),
593 .mode = 0644,
594 .proc_handler = &proc_dointvec
595 },
596 {
597 .ctl_name = NET_IPV4_ICMP_RATEMASK,
598 .procname = "icmp_ratemask",
599 .data = &sysctl_icmp_ratemask,
600 .maxlen = sizeof(int),
601 .mode = 0644,
602 .proc_handler = &proc_dointvec
603 },
604 {
605 .ctl_name = NET_TCP_TW_REUSE, 557 .ctl_name = NET_TCP_TW_REUSE,
606 .procname = "tcp_tw_reuse", 558 .procname = "tcp_tw_reuse",
607 .data = &sysctl_tcp_tw_reuse, 559 .data = &sysctl_tcp_tw_reuse,
@@ -804,6 +756,58 @@ static struct ctl_table ipv4_table[] = {
804 { .ctl_name = 0 } 756 { .ctl_name = 0 }
805}; 757};
806 758
759static struct ctl_table ipv4_net_table[] = {
760 {
761 .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_ALL,
762 .procname = "icmp_echo_ignore_all",
763 .data = &init_net.ipv4.sysctl_icmp_echo_ignore_all,
764 .maxlen = sizeof(int),
765 .mode = 0644,
766 .proc_handler = &proc_dointvec
767 },
768 {
769 .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS,
770 .procname = "icmp_echo_ignore_broadcasts",
771 .data = &init_net.ipv4.sysctl_icmp_echo_ignore_broadcasts,
772 .maxlen = sizeof(int),
773 .mode = 0644,
774 .proc_handler = &proc_dointvec
775 },
776 {
777 .ctl_name = NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES,
778 .procname = "icmp_ignore_bogus_error_responses",
779 .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
780 .maxlen = sizeof(int),
781 .mode = 0644,
782 .proc_handler = &proc_dointvec
783 },
784 {
785 .ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR,
786 .procname = "icmp_errors_use_inbound_ifaddr",
787 .data = &init_net.ipv4.sysctl_icmp_errors_use_inbound_ifaddr,
788 .maxlen = sizeof(int),
789 .mode = 0644,
790 .proc_handler = &proc_dointvec
791 },
792 {
793 .ctl_name = NET_IPV4_ICMP_RATELIMIT,
794 .procname = "icmp_ratelimit",
795 .data = &init_net.ipv4.sysctl_icmp_ratelimit,
796 .maxlen = sizeof(int),
797 .mode = 0644,
798 .proc_handler = &proc_dointvec
799 },
800 {
801 .ctl_name = NET_IPV4_ICMP_RATEMASK,
802 .procname = "icmp_ratemask",
803 .data = &init_net.ipv4.sysctl_icmp_ratemask,
804 .maxlen = sizeof(int),
805 .mode = 0644,
806 .proc_handler = &proc_dointvec
807 },
808 { }
809};
810
807struct ctl_path net_ipv4_ctl_path[] = { 811struct ctl_path net_ipv4_ctl_path[] = {
808 { .procname = "net", .ctl_name = CTL_NET, }, 812 { .procname = "net", .ctl_name = CTL_NET, },
809 { .procname = "ipv4", .ctl_name = NET_IPV4, }, 813 { .procname = "ipv4", .ctl_name = NET_IPV4, },
@@ -811,12 +815,72 @@ struct ctl_path net_ipv4_ctl_path[] = {
811}; 815};
812EXPORT_SYMBOL_GPL(net_ipv4_ctl_path); 816EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
813 817
818static __net_init int ipv4_sysctl_init_net(struct net *net)
819{
820 struct ctl_table *table;
821
822 table = ipv4_net_table;
823 if (net != &init_net) {
824 table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
825 if (table == NULL)
826 goto err_alloc;
827
828 table[0].data =
829 &net->ipv4.sysctl_icmp_echo_ignore_all;
830 table[1].data =
831 &net->ipv4.sysctl_icmp_echo_ignore_broadcasts;
832 table[2].data =
833 &net->ipv4.sysctl_icmp_ignore_bogus_error_responses;
834 table[3].data =
835 &net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr;
836 table[4].data =
837 &net->ipv4.sysctl_icmp_ratelimit;
838 table[5].data =
839 &net->ipv4.sysctl_icmp_ratemask;
840 }
841
842 net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
843 net_ipv4_ctl_path, table);
844 if (net->ipv4.ipv4_hdr == NULL)
845 goto err_reg;
846
847 return 0;
848
849err_reg:
850 if (net != &init_net)
851 kfree(table);
852err_alloc:
853 return -ENOMEM;
854}
855
856static __net_exit void ipv4_sysctl_exit_net(struct net *net)
857{
858 struct ctl_table *table;
859
860 table = net->ipv4.ipv4_hdr->ctl_table_arg;
861 unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
862 kfree(table);
863}
864
865static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
866 .init = ipv4_sysctl_init_net,
867 .exit = ipv4_sysctl_exit_net,
868};
869
814static __init int sysctl_ipv4_init(void) 870static __init int sysctl_ipv4_init(void)
815{ 871{
816 struct ctl_table_header *hdr; 872 struct ctl_table_header *hdr;
817 873
818 hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); 874 hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table);
819 return hdr == NULL ? -ENOMEM : 0; 875 if (hdr == NULL)
876 return -ENOMEM;
877
878 if (register_pernet_subsys(&ipv4_sysctl_ops)) {
879 unregister_sysctl_table(hdr);
880 return -ENOMEM;
881 }
882
883 return 0;
820} 884}
821 885
822__initcall(sysctl_ipv4_init); 886__initcall(sysctl_ipv4_init);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 39b629ac2404..58ac838bf460 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2105,15 +2105,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2105 break; 2105 break;
2106 2106
2107 case TCP_DEFER_ACCEPT: 2107 case TCP_DEFER_ACCEPT:
2108 icsk->icsk_accept_queue.rskq_defer_accept = 0; 2108 if (val < 0) {
2109 if (val > 0) { 2109 err = -EINVAL;
2110 /* Translate value in seconds to number of 2110 } else {
2111 * retransmits */ 2111 if (val > MAX_TCP_ACCEPT_DEFERRED)
2112 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && 2112 val = MAX_TCP_ACCEPT_DEFERRED;
2113 val > ((TCP_TIMEOUT_INIT / HZ) << 2113 icsk->icsk_accept_queue.rskq_defer_accept = val;
2114 icsk->icsk_accept_queue.rskq_defer_accept))
2115 icsk->icsk_accept_queue.rskq_defer_accept++;
2116 icsk->icsk_accept_queue.rskq_defer_accept++;
2117 } 2114 }
2118 break; 2115 break;
2119 2116
@@ -2295,8 +2292,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2295 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 2292 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2296 break; 2293 break;
2297 case TCP_DEFER_ACCEPT: 2294 case TCP_DEFER_ACCEPT:
2298 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : 2295 val = icsk->icsk_accept_queue.rskq_defer_accept;
2299 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2300 break; 2296 break;
2301 case TCP_WINDOW_CLAMP: 2297 case TCP_WINDOW_CLAMP:
2302 val = tp->window_clamp; 2298 val = tp->window_clamp;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 3aa0b23c1ea0..eb5b9854c8c7 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -1,12 +1,13 @@
1/* 1/*
2 * TCP CUBIC: Binary Increase Congestion control for TCP v2.1 2 * TCP CUBIC: Binary Increase Congestion control for TCP v2.2
3 * 3 * Home page:
4 * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
4 * This is from the implementation of CUBIC TCP in 5 * This is from the implementation of CUBIC TCP in
5 * Injong Rhee, Lisong Xu. 6 * Injong Rhee, Lisong Xu.
6 * "CUBIC: A New TCP-Friendly High-Speed TCP Variant 7 * "CUBIC: A New TCP-Friendly High-Speed TCP Variant
7 * in PFLDnet 2005 8 * in PFLDnet 2005
8 * Available from: 9 * Available from:
9 * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf 10 * http://netsrv.csc.ncsu.edu/export/cubic-paper.pdf
10 * 11 *
11 * Unless CUBIC is enabled and congestion window is large 12 * Unless CUBIC is enabled and congestion window is large
12 * this behaves the same as the original Reno. 13 * this behaves the same as the original Reno.
@@ -20,15 +21,10 @@
20#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation 21#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
21 * max_cwnd = snd_cwnd * beta 22 * max_cwnd = snd_cwnd * beta
22 */ 23 */
23#define BICTCP_B 4 /*
24 * In binary search,
25 * go to point (max+min)/N
26 */
27#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ 24#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
28 25
29static int fast_convergence __read_mostly = 1; 26static int fast_convergence __read_mostly = 1;
30static int max_increment __read_mostly = 16; 27static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
31static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
32static int initial_ssthresh __read_mostly; 28static int initial_ssthresh __read_mostly;
33static int bic_scale __read_mostly = 41; 29static int bic_scale __read_mostly = 41;
34static int tcp_friendliness __read_mostly = 1; 30static int tcp_friendliness __read_mostly = 1;
@@ -40,9 +36,7 @@ static u64 cube_factor __read_mostly;
40/* Note parameters that are used for precomputing scale factors are read-only */ 36/* Note parameters that are used for precomputing scale factors are read-only */
41module_param(fast_convergence, int, 0644); 37module_param(fast_convergence, int, 0644);
42MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); 38MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
43module_param(max_increment, int, 0644); 39module_param(beta, int, 0644);
44MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search");
45module_param(beta, int, 0444);
46MODULE_PARM_DESC(beta, "beta for multiplicative increase"); 40MODULE_PARM_DESC(beta, "beta for multiplicative increase");
47module_param(initial_ssthresh, int, 0644); 41module_param(initial_ssthresh, int, 0644);
48MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); 42MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
@@ -145,7 +139,7 @@ static u32 cubic_root(u64 a)
145static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 139static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
146{ 140{
147 u64 offs; 141 u64 offs;
148 u32 delta, t, bic_target, min_cnt, max_cnt; 142 u32 delta, t, bic_target, max_cnt;
149 143
150 ca->ack_cnt++; /* count the number of ACKs */ 144 ca->ack_cnt++; /* count the number of ACKs */
151 145
@@ -211,19 +205,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
211 ca->cnt = 100 * cwnd; /* very small increment*/ 205 ca->cnt = 100 * cwnd; /* very small increment*/
212 } 206 }
213 207
214 if (ca->delay_min > 0) {
215 /* max increment = Smax * rtt / 0.1 */
216 min_cnt = (cwnd * HZ * 8)/(10 * max_increment * ca->delay_min);
217
218 /* use concave growth when the target is above the origin */
219 if (ca->cnt < min_cnt && t >= ca->bic_K)
220 ca->cnt = min_cnt;
221 }
222
223 /* slow start and low utilization */
224 if (ca->loss_cwnd == 0) /* could be aggressive in slow start */
225 ca->cnt = 50;
226
227 /* TCP Friendly */ 208 /* TCP Friendly */
228 if (tcp_friendliness) { 209 if (tcp_friendliness) {
229 u32 scale = beta_scale; 210 u32 scale = beta_scale;
@@ -391,4 +372,4 @@ module_exit(cubictcp_unregister);
391MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); 372MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
392MODULE_LICENSE("GPL"); 373MODULE_LICENSE("GPL");
393MODULE_DESCRIPTION("CUBIC TCP"); 374MODULE_DESCRIPTION("CUBIC TCP");
394MODULE_VERSION("2.1"); 375MODULE_VERSION("2.2");
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bbb7d88a16b4..cdc051bfdb4d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2309,12 +2309,25 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2309 struct tcp_sock *tp = tcp_sk(sk); 2309 struct tcp_sock *tp = tcp_sk(sk);
2310 struct inet_sock *inet = inet_sk(sk); 2310 struct inet_sock *inet = inet_sk(sk);
2311 2311
2312 printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", 2312 if (sk->sk_family == AF_INET) {
2313 msg, 2313 printk(KERN_DEBUG "Undo %s " NIPQUAD_FMT "/%u c%u l%u ss%u/%u p%u\n",
2314 NIPQUAD(inet->daddr), ntohs(inet->dport), 2314 msg,
2315 tp->snd_cwnd, tcp_left_out(tp), 2315 NIPQUAD(inet->daddr), ntohs(inet->dport),
2316 tp->snd_ssthresh, tp->prior_ssthresh, 2316 tp->snd_cwnd, tcp_left_out(tp),
2317 tp->packets_out); 2317 tp->snd_ssthresh, tp->prior_ssthresh,
2318 tp->packets_out);
2319 }
2320#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2321 else if (sk->sk_family == AF_INET6) {
2322 struct ipv6_pinfo *np = inet6_sk(sk);
2323 printk(KERN_DEBUG "Undo %s " NIP6_FMT "/%u c%u l%u ss%u/%u p%u\n",
2324 msg,
2325 NIP6(np->daddr), ntohs(inet->dport),
2326 tp->snd_cwnd, tcp_left_out(tp),
2327 tp->snd_ssthresh, tp->prior_ssthresh,
2328 tp->packets_out);
2329 }
2330#endif
2318} 2331}
2319#else 2332#else
2320#define DBGUNDO(x...) do { } while (0) 2333#define DBGUNDO(x...) do { } while (0)
@@ -3592,7 +3605,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
3592 * cases we should never reach this piece of code. 3605 * cases we should never reach this piece of code.
3593 */ 3606 */
3594 printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", 3607 printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
3595 __FUNCTION__, sk->sk_state); 3608 __func__, sk->sk_state);
3596 break; 3609 break;
3597 } 3610 }
3598 3611
@@ -4012,7 +4025,7 @@ drop:
4012 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4025 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4013 4026
4014 if (seq == TCP_SKB_CB(skb1)->end_seq) { 4027 if (seq == TCP_SKB_CB(skb1)->end_seq) {
4015 __skb_append(skb1, skb, &tp->out_of_order_queue); 4028 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4016 4029
4017 if (!tp->rx_opt.num_sacks || 4030 if (!tp->rx_opt.num_sacks ||
4018 tp->selective_acks[0].end_seq != seq) 4031 tp->selective_acks[0].end_seq != seq)
@@ -4508,6 +4521,49 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
4508 } 4521 }
4509} 4522}
4510 4523
4524static int tcp_defer_accept_check(struct sock *sk)
4525{
4526 struct tcp_sock *tp = tcp_sk(sk);
4527
4528 if (tp->defer_tcp_accept.request) {
4529 int queued_data = tp->rcv_nxt - tp->copied_seq;
4530 int hasfin = !skb_queue_empty(&sk->sk_receive_queue) ?
4531 tcp_hdr((struct sk_buff *)
4532 sk->sk_receive_queue.prev)->fin : 0;
4533
4534 if (queued_data && hasfin)
4535 queued_data--;
4536
4537 if (queued_data &&
4538 tp->defer_tcp_accept.listen_sk->sk_state == TCP_LISTEN) {
4539 if (sock_flag(sk, SOCK_KEEPOPEN)) {
4540 inet_csk_reset_keepalive_timer(sk,
4541 keepalive_time_when(tp));
4542 } else {
4543 inet_csk_delete_keepalive_timer(sk);
4544 }
4545
4546 inet_csk_reqsk_queue_add(
4547 tp->defer_tcp_accept.listen_sk,
4548 tp->defer_tcp_accept.request,
4549 sk);
4550
4551 tp->defer_tcp_accept.listen_sk->sk_data_ready(
4552 tp->defer_tcp_accept.listen_sk, 0);
4553
4554 sock_put(tp->defer_tcp_accept.listen_sk);
4555 sock_put(sk);
4556 tp->defer_tcp_accept.listen_sk = NULL;
4557 tp->defer_tcp_accept.request = NULL;
4558 } else if (hasfin ||
4559 tp->defer_tcp_accept.listen_sk->sk_state != TCP_LISTEN) {
4560 tcp_reset(sk);
4561 return -1;
4562 }
4563 }
4564 return 0;
4565}
4566
4511static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) 4567static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
4512{ 4568{
4513 struct tcp_sock *tp = tcp_sk(sk); 4569 struct tcp_sock *tp = tcp_sk(sk);
@@ -4868,6 +4924,9 @@ step5:
4868 4924
4869 tcp_data_snd_check(sk); 4925 tcp_data_snd_check(sk);
4870 tcp_ack_snd_check(sk); 4926 tcp_ack_snd_check(sk);
4927
4928 if (tcp_defer_accept_check(sk))
4929 return -1;
4871 return 0; 4930 return 0;
4872 4931
4873csum_error: 4932csum_error:
@@ -5387,6 +5446,7 @@ discard:
5387 5446
5388EXPORT_SYMBOL(sysctl_tcp_ecn); 5447EXPORT_SYMBOL(sysctl_tcp_ecn);
5389EXPORT_SYMBOL(sysctl_tcp_reordering); 5448EXPORT_SYMBOL(sysctl_tcp_reordering);
5449EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5390EXPORT_SYMBOL(tcp_parse_options); 5450EXPORT_SYMBOL(tcp_parse_options);
5391EXPORT_SYMBOL(tcp_rcv_established); 5451EXPORT_SYMBOL(tcp_rcv_established);
5392EXPORT_SYMBOL(tcp_rcv_state_process); 5452EXPORT_SYMBOL(tcp_rcv_state_process);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 00156bf421ca..776615180b93 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -88,9 +88,6 @@ int sysctl_tcp_low_latency __read_mostly;
88/* Check TCP sequence numbers in ICMP packets. */ 88/* Check TCP sequence numbers in ICMP packets. */
89#define ICMP_MIN_LENGTH 8 89#define ICMP_MIN_LENGTH 8
90 90
91/* Socket used for sending RSTs */
92static struct socket *tcp_socket __read_mostly;
93
94void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); 91void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
95 92
96#ifdef CONFIG_TCP_MD5SIG 93#ifdef CONFIG_TCP_MD5SIG
@@ -353,7 +350,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
353 return; 350 return;
354 } 351 }
355 352
356 sk = inet_lookup(skb->dev->nd_net, &tcp_hashinfo, iph->daddr, th->dest, 353 sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest,
357 iph->saddr, th->source, inet_iif(skb)); 354 iph->saddr, th->source, inet_iif(skb));
358 if (!sk) { 355 if (!sk) {
359 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 356 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
@@ -552,7 +549,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
552 if (th->rst) 549 if (th->rst)
553 return; 550 return;
554 551
555 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL) 552 if (skb->rtable->rt_type != RTN_LOCAL)
556 return; 553 return;
557 554
558 /* Swap the send and the receive. */ 555 /* Swap the send and the receive. */
@@ -598,7 +595,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
598 sizeof(struct tcphdr), IPPROTO_TCP, 0); 595 sizeof(struct tcphdr), IPPROTO_TCP, 0);
599 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 596 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
600 597
601 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len); 598 ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb,
599 &arg, arg.iov[0].iov_len);
602 600
603 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 601 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
604 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 602 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
@@ -693,7 +691,8 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
693 if (twsk) 691 if (twsk)
694 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if; 692 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if;
695 693
696 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len); 694 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb,
695 &arg, arg.iov[0].iov_len);
697 696
698 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 697 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
699} 698}
@@ -723,8 +722,8 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
723 * This still operates on a request_sock only, not on a big 722 * This still operates on a request_sock only, not on a big
724 * socket. 723 * socket.
725 */ 724 */
726static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 725static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
727 struct dst_entry *dst) 726 struct dst_entry *dst)
728{ 727{
729 const struct inet_request_sock *ireq = inet_rsk(req); 728 const struct inet_request_sock *ireq = inet_rsk(req);
730 int err = -1; 729 int err = -1;
@@ -732,7 +731,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
732 731
733 /* First, grab a route. */ 732 /* First, grab a route. */
734 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) 733 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
735 goto out; 734 return -1;
736 735
737 skb = tcp_make_synack(sk, dst, req); 736 skb = tcp_make_synack(sk, dst, req);
738 737
@@ -751,11 +750,15 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
751 err = net_xmit_eval(err); 750 err = net_xmit_eval(err);
752 } 751 }
753 752
754out:
755 dst_release(dst); 753 dst_release(dst);
756 return err; 754 return err;
757} 755}
758 756
757static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
758{
759 return __tcp_v4_send_synack(sk, req, NULL);
760}
761
759/* 762/*
760 * IPv4 request_sock destructor. 763 * IPv4 request_sock destructor.
761 */ 764 */
@@ -1258,8 +1261,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1258#endif 1261#endif
1259 1262
1260 /* Never answer to SYNs send to broadcast or multicast */ 1263 /* Never answer to SYNs send to broadcast or multicast */
1261 if (((struct rtable *)skb->dst)->rt_flags & 1264 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1262 (RTCF_BROADCAST | RTCF_MULTICAST))
1263 goto drop; 1265 goto drop;
1264 1266
1265 /* TW buckets are converted to open requests without 1267 /* TW buckets are converted to open requests without
@@ -1297,10 +1299,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1297 1299
1298 tcp_parse_options(skb, &tmp_opt, 0); 1300 tcp_parse_options(skb, &tmp_opt, 0);
1299 1301
1300 if (want_cookie) { 1302 if (want_cookie && !tmp_opt.saw_tstamp)
1301 tcp_clear_options(&tmp_opt); 1303 tcp_clear_options(&tmp_opt);
1302 tmp_opt.saw_tstamp = 0;
1303 }
1304 1304
1305 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) { 1305 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1306 /* Some OSes (unknown ones, but I see them on web server, which 1306 /* Some OSes (unknown ones, but I see them on web server, which
@@ -1328,6 +1328,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1328 if (want_cookie) { 1328 if (want_cookie) {
1329#ifdef CONFIG_SYN_COOKIES 1329#ifdef CONFIG_SYN_COOKIES
1330 syn_flood_warning(skb); 1330 syn_flood_warning(skb);
1331 req->cookie_ts = tmp_opt.tstamp_ok;
1331#endif 1332#endif
1332 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1333 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1333 } else if (!isn) { 1334 } else if (!isn) {
@@ -1351,8 +1352,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1351 (s32)(peer->tcp_ts - req->ts_recent) > 1352 (s32)(peer->tcp_ts - req->ts_recent) >
1352 TCP_PAWS_WINDOW) { 1353 TCP_PAWS_WINDOW) {
1353 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); 1354 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1354 dst_release(dst); 1355 goto drop_and_release;
1355 goto drop_and_free;
1356 } 1356 }
1357 } 1357 }
1358 /* Kill the following clause, if you dislike this way. */ 1358 /* Kill the following clause, if you dislike this way. */
@@ -1369,27 +1369,24 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1369 * to the moment of synflood. 1369 * to the moment of synflood.
1370 */ 1370 */
1371 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " 1371 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
1372 "request from %u.%u.%u.%u/%u\n", 1372 "request from " NIPQUAD_FMT "/%u\n",
1373 NIPQUAD(saddr), 1373 NIPQUAD(saddr),
1374 ntohs(tcp_hdr(skb)->source)); 1374 ntohs(tcp_hdr(skb)->source));
1375 dst_release(dst); 1375 goto drop_and_release;
1376 goto drop_and_free;
1377 } 1376 }
1378 1377
1379 isn = tcp_v4_init_sequence(skb); 1378 isn = tcp_v4_init_sequence(skb);
1380 } 1379 }
1381 tcp_rsk(req)->snt_isn = isn; 1380 tcp_rsk(req)->snt_isn = isn;
1382 1381
1383 if (tcp_v4_send_synack(sk, req, dst)) 1382 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1384 goto drop_and_free; 1383 goto drop_and_free;
1385 1384
1386 if (want_cookie) { 1385 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1387 reqsk_free(req);
1388 } else {
1389 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1390 }
1391 return 0; 1386 return 0;
1392 1387
1388drop_and_release:
1389 dst_release(dst);
1393drop_and_free: 1390drop_and_free:
1394 reqsk_free(req); 1391 reqsk_free(req);
1395drop: 1392drop:
@@ -1487,7 +1484,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1487 if (req) 1484 if (req)
1488 return tcp_check_req(sk, skb, req, prev); 1485 return tcp_check_req(sk, skb, req, prev);
1489 1486
1490 nsk = inet_lookup_established(sk->sk_net, &tcp_hashinfo, iph->saddr, 1487 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1491 th->source, iph->daddr, th->dest, inet_iif(skb)); 1488 th->source, iph->daddr, th->dest, inet_iif(skb));
1492 1489
1493 if (nsk) { 1490 if (nsk) {
@@ -1645,7 +1642,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1645 TCP_SKB_CB(skb)->flags = iph->tos; 1642 TCP_SKB_CB(skb)->flags = iph->tos;
1646 TCP_SKB_CB(skb)->sacked = 0; 1643 TCP_SKB_CB(skb)->sacked = 0;
1647 1644
1648 sk = __inet_lookup(skb->dev->nd_net, &tcp_hashinfo, iph->saddr, 1645 sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr,
1649 th->source, iph->daddr, th->dest, inet_iif(skb)); 1646 th->source, iph->daddr, th->dest, inet_iif(skb));
1650 if (!sk) 1647 if (!sk)
1651 goto no_tcp_socket; 1648 goto no_tcp_socket;
@@ -1719,7 +1716,7 @@ do_time_wait:
1719 } 1716 }
1720 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1717 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1721 case TCP_TW_SYN: { 1718 case TCP_TW_SYN: {
1722 struct sock *sk2 = inet_lookup_listener(skb->dev->nd_net, 1719 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1723 &tcp_hashinfo, 1720 &tcp_hashinfo,
1724 iph->daddr, th->dest, 1721 iph->daddr, th->dest,
1725 inet_iif(skb)); 1722 inet_iif(skb));
@@ -1921,6 +1918,14 @@ int tcp_v4_destroy_sock(struct sock *sk)
1921 sk->sk_sndmsg_page = NULL; 1918 sk->sk_sndmsg_page = NULL;
1922 } 1919 }
1923 1920
1921 if (tp->defer_tcp_accept.request) {
1922 reqsk_free(tp->defer_tcp_accept.request);
1923 sock_put(tp->defer_tcp_accept.listen_sk);
1924 sock_put(sk);
1925 tp->defer_tcp_accept.listen_sk = NULL;
1926 tp->defer_tcp_accept.request = NULL;
1927 }
1928
1924 atomic_dec(&tcp_sockets_allocated); 1929 atomic_dec(&tcp_sockets_allocated);
1925 1930
1926 return 0; 1931 return 0;
@@ -1949,6 +1954,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1949 struct hlist_node *node; 1954 struct hlist_node *node;
1950 struct sock *sk = cur; 1955 struct sock *sk = cur;
1951 struct tcp_iter_state* st = seq->private; 1956 struct tcp_iter_state* st = seq->private;
1957 struct net *net = seq_file_net(seq);
1952 1958
1953 if (!sk) { 1959 if (!sk) {
1954 st->bucket = 0; 1960 st->bucket = 0;
@@ -1965,7 +1971,8 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1965 req = req->dl_next; 1971 req = req->dl_next;
1966 while (1) { 1972 while (1) {
1967 while (req) { 1973 while (req) {
1968 if (req->rsk_ops->family == st->family) { 1974 if (req->rsk_ops->family == st->family &&
1975 net_eq(sock_net(req->sk), net)) {
1969 cur = req; 1976 cur = req;
1970 goto out; 1977 goto out;
1971 } 1978 }
@@ -1989,7 +1996,7 @@ get_req:
1989 } 1996 }
1990get_sk: 1997get_sk:
1991 sk_for_each_from(sk, node) { 1998 sk_for_each_from(sk, node) {
1992 if (sk->sk_family == st->family) { 1999 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1993 cur = sk; 2000 cur = sk;
1994 goto out; 2001 goto out;
1995 } 2002 }
@@ -2028,6 +2035,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2028static void *established_get_first(struct seq_file *seq) 2035static void *established_get_first(struct seq_file *seq)
2029{ 2036{
2030 struct tcp_iter_state* st = seq->private; 2037 struct tcp_iter_state* st = seq->private;
2038 struct net *net = seq_file_net(seq);
2031 void *rc = NULL; 2039 void *rc = NULL;
2032 2040
2033 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { 2041 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
@@ -2038,7 +2046,8 @@ static void *established_get_first(struct seq_file *seq)
2038 2046
2039 read_lock_bh(lock); 2047 read_lock_bh(lock);
2040 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 2048 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2041 if (sk->sk_family != st->family) { 2049 if (sk->sk_family != st->family ||
2050 !net_eq(sock_net(sk), net)) {
2042 continue; 2051 continue;
2043 } 2052 }
2044 rc = sk; 2053 rc = sk;
@@ -2047,7 +2056,8 @@ static void *established_get_first(struct seq_file *seq)
2047 st->state = TCP_SEQ_STATE_TIME_WAIT; 2056 st->state = TCP_SEQ_STATE_TIME_WAIT;
2048 inet_twsk_for_each(tw, node, 2057 inet_twsk_for_each(tw, node,
2049 &tcp_hashinfo.ehash[st->bucket].twchain) { 2058 &tcp_hashinfo.ehash[st->bucket].twchain) {
2050 if (tw->tw_family != st->family) { 2059 if (tw->tw_family != st->family ||
2060 !net_eq(twsk_net(tw), net)) {
2051 continue; 2061 continue;
2052 } 2062 }
2053 rc = tw; 2063 rc = tw;
@@ -2066,6 +2076,7 @@ static void *established_get_next(struct seq_file *seq, void *cur)
2066 struct inet_timewait_sock *tw; 2076 struct inet_timewait_sock *tw;
2067 struct hlist_node *node; 2077 struct hlist_node *node;
2068 struct tcp_iter_state* st = seq->private; 2078 struct tcp_iter_state* st = seq->private;
2079 struct net *net = seq_file_net(seq);
2069 2080
2070 ++st->num; 2081 ++st->num;
2071 2082
@@ -2073,7 +2084,7 @@ static void *established_get_next(struct seq_file *seq, void *cur)
2073 tw = cur; 2084 tw = cur;
2074 tw = tw_next(tw); 2085 tw = tw_next(tw);
2075get_tw: 2086get_tw:
2076 while (tw && tw->tw_family != st->family) { 2087 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2077 tw = tw_next(tw); 2088 tw = tw_next(tw);
2078 } 2089 }
2079 if (tw) { 2090 if (tw) {
@@ -2094,7 +2105,7 @@ get_tw:
2094 sk = sk_next(sk); 2105 sk = sk_next(sk);
2095 2106
2096 sk_for_each_from(sk, node) { 2107 sk_for_each_from(sk, node) {
2097 if (sk->sk_family == st->family) 2108 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2098 goto found; 2109 goto found;
2099 } 2110 }
2100 2111
@@ -2200,48 +2211,37 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2200static int tcp_seq_open(struct inode *inode, struct file *file) 2211static int tcp_seq_open(struct inode *inode, struct file *file)
2201{ 2212{
2202 struct tcp_seq_afinfo *afinfo = PDE(inode)->data; 2213 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2203 struct seq_file *seq;
2204 struct tcp_iter_state *s; 2214 struct tcp_iter_state *s;
2205 int rc; 2215 int err;
2206 2216
2207 if (unlikely(afinfo == NULL)) 2217 if (unlikely(afinfo == NULL))
2208 return -EINVAL; 2218 return -EINVAL;
2209 2219
2210 s = kzalloc(sizeof(*s), GFP_KERNEL); 2220 err = seq_open_net(inode, file, &afinfo->seq_ops,
2211 if (!s) 2221 sizeof(struct tcp_iter_state));
2212 return -ENOMEM; 2222 if (err < 0)
2223 return err;
2224
2225 s = ((struct seq_file *)file->private_data)->private;
2213 s->family = afinfo->family; 2226 s->family = afinfo->family;
2214 s->seq_ops.start = tcp_seq_start; 2227 return 0;
2215 s->seq_ops.next = tcp_seq_next;
2216 s->seq_ops.show = afinfo->seq_show;
2217 s->seq_ops.stop = tcp_seq_stop;
2218
2219 rc = seq_open(file, &s->seq_ops);
2220 if (rc)
2221 goto out_kfree;
2222 seq = file->private_data;
2223 seq->private = s;
2224out:
2225 return rc;
2226out_kfree:
2227 kfree(s);
2228 goto out;
2229} 2228}
2230 2229
2231int tcp_proc_register(struct tcp_seq_afinfo *afinfo) 2230int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2232{ 2231{
2233 int rc = 0; 2232 int rc = 0;
2234 struct proc_dir_entry *p; 2233 struct proc_dir_entry *p;
2235 2234
2236 if (!afinfo) 2235 afinfo->seq_fops.open = tcp_seq_open;
2237 return -EINVAL; 2236 afinfo->seq_fops.read = seq_read;
2238 afinfo->seq_fops->owner = afinfo->owner; 2237 afinfo->seq_fops.llseek = seq_lseek;
2239 afinfo->seq_fops->open = tcp_seq_open; 2238 afinfo->seq_fops.release = seq_release_net;
2240 afinfo->seq_fops->read = seq_read; 2239
2241 afinfo->seq_fops->llseek = seq_lseek; 2240 afinfo->seq_ops.start = tcp_seq_start;
2242 afinfo->seq_fops->release = seq_release_private; 2241 afinfo->seq_ops.next = tcp_seq_next;
2242 afinfo->seq_ops.stop = tcp_seq_stop;
2243 2243
2244 p = proc_net_fops_create(&init_net, afinfo->name, S_IRUGO, afinfo->seq_fops); 2244 p = proc_net_fops_create(net, afinfo->name, S_IRUGO, &afinfo->seq_fops);
2245 if (p) 2245 if (p)
2246 p->data = afinfo; 2246 p->data = afinfo;
2247 else 2247 else
@@ -2249,12 +2249,9 @@ int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2249 return rc; 2249 return rc;
2250} 2250}
2251 2251
2252void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo) 2252void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2253{ 2253{
2254 if (!afinfo) 2254 proc_net_remove(net, afinfo->name);
2255 return;
2256 proc_net_remove(&init_net, afinfo->name);
2257 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
2258} 2255}
2259 2256
2260static void get_openreq4(struct sock *sk, struct request_sock *req, 2257static void get_openreq4(struct sock *sk, struct request_sock *req,
@@ -2383,28 +2380,43 @@ out:
2383 return 0; 2380 return 0;
2384} 2381}
2385 2382
2386static struct file_operations tcp4_seq_fops;
2387static struct tcp_seq_afinfo tcp4_seq_afinfo = { 2383static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2388 .owner = THIS_MODULE,
2389 .name = "tcp", 2384 .name = "tcp",
2390 .family = AF_INET, 2385 .family = AF_INET,
2391 .seq_show = tcp4_seq_show, 2386 .seq_fops = {
2392 .seq_fops = &tcp4_seq_fops, 2387 .owner = THIS_MODULE,
2388 },
2389 .seq_ops = {
2390 .show = tcp4_seq_show,
2391 },
2392};
2393
2394static int tcp4_proc_init_net(struct net *net)
2395{
2396 return tcp_proc_register(net, &tcp4_seq_afinfo);
2397}
2398
2399static void tcp4_proc_exit_net(struct net *net)
2400{
2401 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2402}
2403
2404static struct pernet_operations tcp4_net_ops = {
2405 .init = tcp4_proc_init_net,
2406 .exit = tcp4_proc_exit_net,
2393}; 2407};
2394 2408
2395int __init tcp4_proc_init(void) 2409int __init tcp4_proc_init(void)
2396{ 2410{
2397 return tcp_proc_register(&tcp4_seq_afinfo); 2411 return register_pernet_subsys(&tcp4_net_ops);
2398} 2412}
2399 2413
2400void tcp4_proc_exit(void) 2414void tcp4_proc_exit(void)
2401{ 2415{
2402 tcp_proc_unregister(&tcp4_seq_afinfo); 2416 unregister_pernet_subsys(&tcp4_net_ops);
2403} 2417}
2404#endif /* CONFIG_PROC_FS */ 2418#endif /* CONFIG_PROC_FS */
2405 2419
2406DEFINE_PROTO_INUSE(tcp)
2407
2408struct proto tcp_prot = { 2420struct proto tcp_prot = {
2409 .name = "TCP", 2421 .name = "TCP",
2410 .owner = THIS_MODULE, 2422 .owner = THIS_MODULE,
@@ -2435,18 +2447,33 @@ struct proto tcp_prot = {
2435 .obj_size = sizeof(struct tcp_sock), 2447 .obj_size = sizeof(struct tcp_sock),
2436 .twsk_prot = &tcp_timewait_sock_ops, 2448 .twsk_prot = &tcp_timewait_sock_ops,
2437 .rsk_prot = &tcp_request_sock_ops, 2449 .rsk_prot = &tcp_request_sock_ops,
2438 .hashinfo = &tcp_hashinfo, 2450 .h.hashinfo = &tcp_hashinfo,
2439#ifdef CONFIG_COMPAT 2451#ifdef CONFIG_COMPAT
2440 .compat_setsockopt = compat_tcp_setsockopt, 2452 .compat_setsockopt = compat_tcp_setsockopt,
2441 .compat_getsockopt = compat_tcp_getsockopt, 2453 .compat_getsockopt = compat_tcp_getsockopt,
2442#endif 2454#endif
2443 REF_PROTO_INUSE(tcp)
2444}; 2455};
2445 2456
2446void __init tcp_v4_init(struct net_proto_family *ops) 2457
2458static int __net_init tcp_sk_init(struct net *net)
2459{
2460 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2461 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2462}
2463
2464static void __net_exit tcp_sk_exit(struct net *net)
2465{
2466 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2467}
2468
2469static struct pernet_operations __net_initdata tcp_sk_ops = {
2470 .init = tcp_sk_init,
2471 .exit = tcp_sk_exit,
2472};
2473
2474void __init tcp_v4_init(void)
2447{ 2475{
2448 if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, 2476 if (register_pernet_device(&tcp_sk_ops))
2449 IPPROTO_TCP) < 0)
2450 panic("Failed to create the TCP control socket.\n"); 2477 panic("Failed to create the TCP control socket.\n");
2451} 2478}
2452 2479
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b61b76847ad9..019c8c16e5cc 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -35,6 +35,8 @@
35#endif 35#endif
36 36
37int sysctl_tcp_syncookies __read_mostly = SYNC_INIT; 37int sysctl_tcp_syncookies __read_mostly = SYNC_INIT;
38EXPORT_SYMBOL(sysctl_tcp_syncookies);
39
38int sysctl_tcp_abort_on_overflow __read_mostly; 40int sysctl_tcp_abort_on_overflow __read_mostly;
39 41
40struct inet_timewait_death_row tcp_death_row = { 42struct inet_timewait_death_row tcp_death_row = {
@@ -536,7 +538,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
536 * Enforce "SYN-ACK" according to figure 8, figure 6 538 * Enforce "SYN-ACK" according to figure 8, figure 6
537 * of RFC793, fixed by RFC1122. 539 * of RFC793, fixed by RFC1122.
538 */ 540 */
539 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 541 req->rsk_ops->rtx_syn_ack(sk, req);
540 return NULL; 542 return NULL;
541 } 543 }
542 544
@@ -569,10 +571,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
569 does sequence test, SYN is truncated, and thus we consider 571 does sequence test, SYN is truncated, and thus we consider
570 it a bare ACK. 572 it a bare ACK.
571 573
572 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this 574 Both ends (listening sockets) accept the new incoming
573 bare ACK. Otherwise, we create an established connection. Both 575 connection and try to talk to each other. 8-)
574 ends (listening sockets) accept the new incoming connection and try
575 to talk to each other. 8-)
576 576
577 Note: This case is both harmless, and rare. Possibility is about the 577 Note: This case is both harmless, and rare. Possibility is about the
578 same as us discovering intelligent life on another plant tomorrow. 578 same as us discovering intelligent life on another plant tomorrow.
@@ -640,13 +640,6 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
640 if (!(flg & TCP_FLAG_ACK)) 640 if (!(flg & TCP_FLAG_ACK))
641 return NULL; 641 return NULL;
642 642
643 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
644 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
645 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
646 inet_rsk(req)->acked = 1;
647 return NULL;
648 }
649
650 /* OK, ACK is valid, create big socket and 643 /* OK, ACK is valid, create big socket and
651 * feed this segment to it. It will repeat all 644 * feed this segment to it. It will repeat all
652 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 645 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
@@ -685,7 +678,24 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
685 inet_csk_reqsk_queue_unlink(sk, req, prev); 678 inet_csk_reqsk_queue_unlink(sk, req, prev);
686 inet_csk_reqsk_queue_removed(sk, req); 679 inet_csk_reqsk_queue_removed(sk, req);
687 680
688 inet_csk_reqsk_queue_add(sk, req, child); 681 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
682 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
683
684 /* the accept queue handling is done is est recv slow
685 * path so lets make sure to start there
686 */
687 tcp_sk(child)->pred_flags = 0;
688 sock_hold(sk);
689 sock_hold(child);
690 tcp_sk(child)->defer_tcp_accept.listen_sk = sk;
691 tcp_sk(child)->defer_tcp_accept.request = req;
692
693 inet_csk_reset_keepalive_timer(child,
694 inet_csk(sk)->icsk_accept_queue.rskq_defer_accept * HZ);
695 } else {
696 inet_csk_reqsk_queue_add(sk, req, child);
697 }
698
689 return child; 699 return child;
690 700
691 listen_overflow: 701 listen_overflow:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d29ef79c00ca..debf23581606 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -998,7 +998,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
998 xmit_size_goal = mss_now; 998 xmit_size_goal = mss_now;
999 999
1000 if (doing_tso) { 1000 if (doing_tso) {
1001 xmit_size_goal = (65535 - 1001 xmit_size_goal = ((sk->sk_gso_max_size - 1) -
1002 inet_csk(sk)->icsk_af_ops->net_header_len - 1002 inet_csk(sk)->icsk_af_ops->net_header_len -
1003 inet_csk(sk)->icsk_ext_hdr_len - 1003 inet_csk(sk)->icsk_ext_hdr_len -
1004 tp->tcp_header_len); 1004 tp->tcp_header_len);
@@ -1057,7 +1057,7 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
1057 1057
1058 needed = min(skb->len, window); 1058 needed = min(skb->len, window);
1059 1059
1060 if (skb == tcp_write_queue_tail(sk) && cwnd_len <= needed) 1060 if (cwnd_len <= needed)
1061 return cwnd_len; 1061 return cwnd_len;
1062 1062
1063 return needed - needed % mss_now; 1063 return needed - needed % mss_now;
@@ -1282,7 +1282,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1282 limit = min(send_win, cong_win); 1282 limit = min(send_win, cong_win);
1283 1283
1284 /* If a full-sized TSO skb can be sent, do it. */ 1284 /* If a full-sized TSO skb can be sent, do it. */
1285 if (limit >= 65536) 1285 if (limit >= sk->sk_gso_max_size)
1286 goto send_now; 1286 goto send_now;
1287 1287
1288 if (sysctl_tcp_tso_win_divisor) { 1288 if (sysctl_tcp_tso_win_divisor) {
@@ -2236,7 +2236,11 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2236 2236
2237 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2237 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2238 th->window = htons(min(req->rcv_wnd, 65535U)); 2238 th->window = htons(min(req->rcv_wnd, 65535U));
2239 2239#ifdef CONFIG_SYN_COOKIES
2240 if (unlikely(req->cookie_ts))
2241 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2242 else
2243#endif
2240 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2244 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2241 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 2245 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
2242 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 2246 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
@@ -2571,6 +2575,7 @@ void tcp_send_probe0(struct sock *sk)
2571 } 2575 }
2572} 2576}
2573 2577
2578EXPORT_SYMBOL(tcp_select_initial_window);
2574EXPORT_SYMBOL(tcp_connect); 2579EXPORT_SYMBOL(tcp_connect);
2575EXPORT_SYMBOL(tcp_make_synack); 2580EXPORT_SYMBOL(tcp_make_synack);
2576EXPORT_SYMBOL(tcp_simple_retransmit); 2581EXPORT_SYMBOL(tcp_simple_retransmit);
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 87dd5bff315f..1c509592574a 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -153,7 +153,7 @@ static int tcpprobe_sprint(char *tbuf, int n)
153 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 153 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
154 154
155 return snprintf(tbuf, n, 155 return snprintf(tbuf, n,
156 "%lu.%09lu %d.%d.%d.%d:%u %d.%d.%d.%d:%u" 156 "%lu.%09lu " NIPQUAD_FMT ":%u " NIPQUAD_FMT ":%u"
157 " %d %#x %#x %u %u %u %u\n", 157 " %d %#x %#x %u %u %u %u\n",
158 (unsigned long) tv.tv_sec, 158 (unsigned long) tv.tv_sec,
159 (unsigned long) tv.tv_nsec, 159 (unsigned long) tv.tv_nsec,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 803d758a2b12..4de68cf5f2aa 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -299,12 +299,20 @@ static void tcp_retransmit_timer(struct sock *sk)
299 * we cannot allow such beasts to hang infinitely. 299 * we cannot allow such beasts to hang infinitely.
300 */ 300 */
301#ifdef TCP_DEBUG 301#ifdef TCP_DEBUG
302 if (1) { 302 struct inet_sock *inet = inet_sk(sk);
303 struct inet_sock *inet = inet_sk(sk); 303 if (sk->sk_family == AF_INET) {
304 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n", 304 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer " NIPQUAD_FMT ":%u/%u shrinks window %u:%u. Repaired.\n",
305 NIPQUAD(inet->daddr), ntohs(inet->dport), 305 NIPQUAD(inet->daddr), ntohs(inet->dport),
306 inet->num, tp->snd_una, tp->snd_nxt); 306 inet->num, tp->snd_una, tp->snd_nxt);
307 } 307 }
308#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
309 else if (sk->sk_family == AF_INET6) {
310 struct ipv6_pinfo *np = inet6_sk(sk);
311 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer " NIP6_FMT ":%u/%u shrinks window %u:%u. Repaired.\n",
312 NIP6(np->daddr), ntohs(inet->dport),
313 inet->num, tp->snd_una, tp->snd_nxt);
314 }
315#endif
308#endif 316#endif
309 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { 317 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
310 tcp_write_err(sk); 318 tcp_write_err(sk);
@@ -481,6 +489,11 @@ static void tcp_keepalive_timer (unsigned long data)
481 goto death; 489 goto death;
482 } 490 }
483 491
492 if (tp->defer_tcp_accept.request && sk->sk_state == TCP_ESTABLISHED) {
493 tcp_send_active_reset(sk, GFP_ATOMIC);
494 goto death;
495 }
496
484 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 497 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
485 goto out; 498 goto out;
486 499
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 978b3fd61e65..d3b709a6f264 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -136,6 +136,7 @@ static struct net_protocol tunnel4_protocol = {
136 .handler = tunnel4_rcv, 136 .handler = tunnel4_rcv,
137 .err_handler = tunnel4_err, 137 .err_handler = tunnel4_err,
138 .no_policy = 1, 138 .no_policy = 1,
139 .netns_ok = 1,
139}; 140};
140 141
141#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 142#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -143,6 +144,7 @@ static struct net_protocol tunnel64_protocol = {
143 .handler = tunnel64_rcv, 144 .handler = tunnel64_rcv,
144 .err_handler = tunnel64_err, 145 .err_handler = tunnel64_err,
145 .no_policy = 1, 146 .no_policy = 1,
147 .netns_ok = 1,
146}; 148};
147#endif 149#endif
148 150
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1704c1474ea1..b053ac795275 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -137,29 +137,28 @@ static inline int __udp_lib_lport_inuse(struct net *net, __u16 num,
137 struct hlist_node *node; 137 struct hlist_node *node;
138 138
139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
140 if (sk->sk_net == net && sk->sk_hash == num) 140 if (net_eq(sock_net(sk), net) && sk->sk_hash == num)
141 return 1; 141 return 1;
142 return 0; 142 return 0;
143} 143}
144 144
145/** 145/**
146 * __udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 146 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
147 * 147 *
148 * @sk: socket struct in question 148 * @sk: socket struct in question
149 * @snum: port number to look up 149 * @snum: port number to look up
150 * @udptable: hash list table, must be of UDP_HTABLE_SIZE
151 * @saddr_comp: AF-dependent comparison of bound local IP addresses 150 * @saddr_comp: AF-dependent comparison of bound local IP addresses
152 */ 151 */
153int __udp_lib_get_port(struct sock *sk, unsigned short snum, 152int udp_lib_get_port(struct sock *sk, unsigned short snum,
154 struct hlist_head udptable[],
155 int (*saddr_comp)(const struct sock *sk1, 153 int (*saddr_comp)(const struct sock *sk1,
156 const struct sock *sk2 ) ) 154 const struct sock *sk2 ) )
157{ 155{
156 struct hlist_head *udptable = sk->sk_prot->h.udp_hash;
158 struct hlist_node *node; 157 struct hlist_node *node;
159 struct hlist_head *head; 158 struct hlist_head *head;
160 struct sock *sk2; 159 struct sock *sk2;
161 int error = 1; 160 int error = 1;
162 struct net *net = sk->sk_net; 161 struct net *net = sock_net(sk);
163 162
164 write_lock_bh(&udp_hash_lock); 163 write_lock_bh(&udp_hash_lock);
165 164
@@ -219,7 +218,7 @@ gotit:
219 sk_for_each(sk2, node, head) 218 sk_for_each(sk2, node, head)
220 if (sk2->sk_hash == snum && 219 if (sk2->sk_hash == snum &&
221 sk2 != sk && 220 sk2 != sk &&
222 sk2->sk_net == net && 221 net_eq(sock_net(sk2), net) &&
223 (!sk2->sk_reuse || !sk->sk_reuse) && 222 (!sk2->sk_reuse || !sk->sk_reuse) &&
224 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 223 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
225 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 224 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
@@ -232,7 +231,7 @@ gotit:
232 if (sk_unhashed(sk)) { 231 if (sk_unhashed(sk)) {
233 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 232 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
234 sk_add_node(sk, head); 233 sk_add_node(sk, head);
235 sock_prot_inuse_add(sk->sk_prot, 1); 234 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
236 } 235 }
237 error = 0; 236 error = 0;
238fail: 237fail:
@@ -240,13 +239,7 @@ fail:
240 return error; 239 return error;
241} 240}
242 241
243int udp_get_port(struct sock *sk, unsigned short snum, 242static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
244 int (*scmp)(const struct sock *, const struct sock *))
245{
246 return __udp_lib_get_port(sk, snum, udp_hash, scmp);
247}
248
249int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
250{ 243{
251 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 244 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
252 245
@@ -255,9 +248,9 @@ int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
255 inet1->rcv_saddr == inet2->rcv_saddr )); 248 inet1->rcv_saddr == inet2->rcv_saddr ));
256} 249}
257 250
258static inline int udp_v4_get_port(struct sock *sk, unsigned short snum) 251int udp_v4_get_port(struct sock *sk, unsigned short snum)
259{ 252{
260 return udp_get_port(sk, snum, ipv4_rcv_saddr_equal); 253 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal);
261} 254}
262 255
263/* UDP is nearly always wildcards out the wazoo, it makes no sense to try 256/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
@@ -276,7 +269,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
276 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 269 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
277 struct inet_sock *inet = inet_sk(sk); 270 struct inet_sock *inet = inet_sk(sk);
278 271
279 if (sk->sk_net == net && sk->sk_hash == hnum && 272 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
280 !ipv6_only_sock(sk)) { 273 !ipv6_only_sock(sk)) {
281 int score = (sk->sk_family == PF_INET ? 1 : 0); 274 int score = (sk->sk_family == PF_INET ? 1 : 0);
282 if (inet->rcv_saddr) { 275 if (inet->rcv_saddr) {
@@ -364,7 +357,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
364 int harderr; 357 int harderr;
365 int err; 358 int err;
366 359
367 sk = __udp4_lib_lookup(skb->dev->nd_net, iph->daddr, uh->dest, 360 sk = __udp4_lib_lookup(dev_net(skb->dev), iph->daddr, uh->dest,
368 iph->saddr, uh->source, skb->dev->ifindex, udptable); 361 iph->saddr, uh->source, skb->dev->ifindex, udptable);
369 if (sk == NULL) { 362 if (sk == NULL) {
370 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 363 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
@@ -614,7 +607,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
614 607
615 ipc.oif = sk->sk_bound_dev_if; 608 ipc.oif = sk->sk_bound_dev_if;
616 if (msg->msg_controllen) { 609 if (msg->msg_controllen) {
617 err = ip_cmsg_send(msg, &ipc); 610 err = ip_cmsg_send(sock_net(sk), msg, &ipc);
618 if (err) 611 if (err)
619 return err; 612 return err;
620 if (ipc.opt) 613 if (ipc.opt)
@@ -663,7 +656,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
663 { .sport = inet->sport, 656 { .sport = inet->sport,
664 .dport = dport } } }; 657 .dport = dport } } };
665 security_sk_classify_flow(sk, &fl); 658 security_sk_classify_flow(sk, &fl);
666 err = ip_route_output_flow(&init_net, &rt, &fl, sk, 1); 659 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
667 if (err) { 660 if (err) {
668 if (err == -ENETUNREACH) 661 if (err == -ENETUNREACH)
669 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 662 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
@@ -1188,7 +1181,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1188 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1181 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1189 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1182 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
1190 1183
1191 sk = __udp4_lib_lookup(skb->dev->nd_net, saddr, uh->source, daddr, 1184 sk = __udp4_lib_lookup(dev_net(skb->dev), saddr, uh->source, daddr,
1192 uh->dest, inet_iif(skb), udptable); 1185 uh->dest, inet_iif(skb), udptable);
1193 1186
1194 if (sk != NULL) { 1187 if (sk != NULL) {
@@ -1228,7 +1221,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1228 return 0; 1221 return 0;
1229 1222
1230short_packet: 1223short_packet:
1231 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", 1224 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From " NIPQUAD_FMT ":%u %d/%d to " NIPQUAD_FMT ":%u\n",
1232 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1225 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1233 NIPQUAD(saddr), 1226 NIPQUAD(saddr),
1234 ntohs(uh->source), 1227 ntohs(uh->source),
@@ -1243,7 +1236,7 @@ csum_error:
1243 * RFC1122: OK. Discards the bad packet silently (as far as 1236 * RFC1122: OK. Discards the bad packet silently (as far as
1244 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1237 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1245 */ 1238 */
1246 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", 1239 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From " NIPQUAD_FMT ":%u to " NIPQUAD_FMT ":%u ulen %d\n",
1247 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1240 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1248 NIPQUAD(saddr), 1241 NIPQUAD(saddr),
1249 ntohs(uh->source), 1242 ntohs(uh->source),
@@ -1474,8 +1467,6 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1474 1467
1475} 1468}
1476 1469
1477DEFINE_PROTO_INUSE(udp)
1478
1479struct proto udp_prot = { 1470struct proto udp_prot = {
1480 .name = "UDP", 1471 .name = "UDP",
1481 .owner = THIS_MODULE, 1472 .owner = THIS_MODULE,
@@ -1498,11 +1489,11 @@ struct proto udp_prot = {
1498 .sysctl_wmem = &sysctl_udp_wmem_min, 1489 .sysctl_wmem = &sysctl_udp_wmem_min,
1499 .sysctl_rmem = &sysctl_udp_rmem_min, 1490 .sysctl_rmem = &sysctl_udp_rmem_min,
1500 .obj_size = sizeof(struct udp_sock), 1491 .obj_size = sizeof(struct udp_sock),
1492 .h.udp_hash = udp_hash,
1501#ifdef CONFIG_COMPAT 1493#ifdef CONFIG_COMPAT
1502 .compat_setsockopt = compat_udp_setsockopt, 1494 .compat_setsockopt = compat_udp_setsockopt,
1503 .compat_getsockopt = compat_udp_getsockopt, 1495 .compat_getsockopt = compat_udp_getsockopt,
1504#endif 1496#endif
1505 REF_PROTO_INUSE(udp)
1506}; 1497};
1507 1498
1508/* ------------------------------------------------------------------------ */ 1499/* ------------------------------------------------------------------------ */
@@ -1512,10 +1503,13 @@ static struct sock *udp_get_first(struct seq_file *seq)
1512{ 1503{
1513 struct sock *sk; 1504 struct sock *sk;
1514 struct udp_iter_state *state = seq->private; 1505 struct udp_iter_state *state = seq->private;
1506 struct net *net = seq_file_net(seq);
1515 1507
1516 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { 1508 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1517 struct hlist_node *node; 1509 struct hlist_node *node;
1518 sk_for_each(sk, node, state->hashtable + state->bucket) { 1510 sk_for_each(sk, node, state->hashtable + state->bucket) {
1511 if (!net_eq(sock_net(sk), net))
1512 continue;
1519 if (sk->sk_family == state->family) 1513 if (sk->sk_family == state->family)
1520 goto found; 1514 goto found;
1521 } 1515 }
@@ -1528,12 +1522,13 @@ found:
1528static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) 1522static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1529{ 1523{
1530 struct udp_iter_state *state = seq->private; 1524 struct udp_iter_state *state = seq->private;
1525 struct net *net = seq_file_net(seq);
1531 1526
1532 do { 1527 do {
1533 sk = sk_next(sk); 1528 sk = sk_next(sk);
1534try_again: 1529try_again:
1535 ; 1530 ;
1536 } while (sk && sk->sk_family != state->family); 1531 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
1537 1532
1538 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { 1533 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
1539 sk = sk_head(state->hashtable + state->bucket); 1534 sk = sk_head(state->hashtable + state->bucket);
@@ -1581,47 +1576,36 @@ static void udp_seq_stop(struct seq_file *seq, void *v)
1581static int udp_seq_open(struct inode *inode, struct file *file) 1576static int udp_seq_open(struct inode *inode, struct file *file)
1582{ 1577{
1583 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 1578 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1584 struct seq_file *seq; 1579 struct udp_iter_state *s;
1585 int rc = -ENOMEM; 1580 int err;
1586 struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1587 1581
1588 if (!s) 1582 err = seq_open_net(inode, file, &afinfo->seq_ops,
1589 goto out; 1583 sizeof(struct udp_iter_state));
1584 if (err < 0)
1585 return err;
1586
1587 s = ((struct seq_file *)file->private_data)->private;
1590 s->family = afinfo->family; 1588 s->family = afinfo->family;
1591 s->hashtable = afinfo->hashtable; 1589 s->hashtable = afinfo->hashtable;
1592 s->seq_ops.start = udp_seq_start; 1590 return err;
1593 s->seq_ops.next = udp_seq_next;
1594 s->seq_ops.show = afinfo->seq_show;
1595 s->seq_ops.stop = udp_seq_stop;
1596
1597 rc = seq_open(file, &s->seq_ops);
1598 if (rc)
1599 goto out_kfree;
1600
1601 seq = file->private_data;
1602 seq->private = s;
1603out:
1604 return rc;
1605out_kfree:
1606 kfree(s);
1607 goto out;
1608} 1591}
1609 1592
1610/* ------------------------------------------------------------------------ */ 1593/* ------------------------------------------------------------------------ */
1611int udp_proc_register(struct udp_seq_afinfo *afinfo) 1594int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
1612{ 1595{
1613 struct proc_dir_entry *p; 1596 struct proc_dir_entry *p;
1614 int rc = 0; 1597 int rc = 0;
1615 1598
1616 if (!afinfo) 1599 afinfo->seq_fops.open = udp_seq_open;
1617 return -EINVAL; 1600 afinfo->seq_fops.read = seq_read;
1618 afinfo->seq_fops->owner = afinfo->owner; 1601 afinfo->seq_fops.llseek = seq_lseek;
1619 afinfo->seq_fops->open = udp_seq_open; 1602 afinfo->seq_fops.release = seq_release_net;
1620 afinfo->seq_fops->read = seq_read; 1603
1621 afinfo->seq_fops->llseek = seq_lseek; 1604 afinfo->seq_ops.start = udp_seq_start;
1622 afinfo->seq_fops->release = seq_release_private; 1605 afinfo->seq_ops.next = udp_seq_next;
1606 afinfo->seq_ops.stop = udp_seq_stop;
1623 1607
1624 p = proc_net_fops_create(&init_net, afinfo->name, S_IRUGO, afinfo->seq_fops); 1608 p = proc_net_fops_create(net, afinfo->name, S_IRUGO, &afinfo->seq_fops);
1625 if (p) 1609 if (p)
1626 p->data = afinfo; 1610 p->data = afinfo;
1627 else 1611 else
@@ -1629,12 +1613,9 @@ int udp_proc_register(struct udp_seq_afinfo *afinfo)
1629 return rc; 1613 return rc;
1630} 1614}
1631 1615
1632void udp_proc_unregister(struct udp_seq_afinfo *afinfo) 1616void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
1633{ 1617{
1634 if (!afinfo) 1618 proc_net_remove(net, afinfo->name);
1635 return;
1636 proc_net_remove(&init_net, afinfo->name);
1637 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1638} 1619}
1639 1620
1640/* ------------------------------------------------------------------------ */ 1621/* ------------------------------------------------------------------------ */
@@ -1673,24 +1654,41 @@ int udp4_seq_show(struct seq_file *seq, void *v)
1673} 1654}
1674 1655
1675/* ------------------------------------------------------------------------ */ 1656/* ------------------------------------------------------------------------ */
1676static struct file_operations udp4_seq_fops;
1677static struct udp_seq_afinfo udp4_seq_afinfo = { 1657static struct udp_seq_afinfo udp4_seq_afinfo = {
1678 .owner = THIS_MODULE,
1679 .name = "udp", 1658 .name = "udp",
1680 .family = AF_INET, 1659 .family = AF_INET,
1681 .hashtable = udp_hash, 1660 .hashtable = udp_hash,
1682 .seq_show = udp4_seq_show, 1661 .seq_fops = {
1683 .seq_fops = &udp4_seq_fops, 1662 .owner = THIS_MODULE,
1663 },
1664 .seq_ops = {
1665 .show = udp4_seq_show,
1666 },
1667};
1668
1669static int udp4_proc_init_net(struct net *net)
1670{
1671 return udp_proc_register(net, &udp4_seq_afinfo);
1672}
1673
1674static void udp4_proc_exit_net(struct net *net)
1675{
1676 udp_proc_unregister(net, &udp4_seq_afinfo);
1677}
1678
1679static struct pernet_operations udp4_net_ops = {
1680 .init = udp4_proc_init_net,
1681 .exit = udp4_proc_exit_net,
1684}; 1682};
1685 1683
1686int __init udp4_proc_init(void) 1684int __init udp4_proc_init(void)
1687{ 1685{
1688 return udp_proc_register(&udp4_seq_afinfo); 1686 return register_pernet_subsys(&udp4_net_ops);
1689} 1687}
1690 1688
1691void udp4_proc_exit(void) 1689void udp4_proc_exit(void)
1692{ 1690{
1693 udp_proc_unregister(&udp4_seq_afinfo); 1691 unregister_pernet_subsys(&udp4_net_ops);
1694} 1692}
1695#endif /* CONFIG_PROC_FS */ 1693#endif /* CONFIG_PROC_FS */
1696 1694
@@ -1717,12 +1715,12 @@ EXPORT_SYMBOL(udp_disconnect);
1717EXPORT_SYMBOL(udp_hash); 1715EXPORT_SYMBOL(udp_hash);
1718EXPORT_SYMBOL(udp_hash_lock); 1716EXPORT_SYMBOL(udp_hash_lock);
1719EXPORT_SYMBOL(udp_ioctl); 1717EXPORT_SYMBOL(udp_ioctl);
1720EXPORT_SYMBOL(udp_get_port);
1721EXPORT_SYMBOL(udp_prot); 1718EXPORT_SYMBOL(udp_prot);
1722EXPORT_SYMBOL(udp_sendmsg); 1719EXPORT_SYMBOL(udp_sendmsg);
1723EXPORT_SYMBOL(udp_lib_getsockopt); 1720EXPORT_SYMBOL(udp_lib_getsockopt);
1724EXPORT_SYMBOL(udp_lib_setsockopt); 1721EXPORT_SYMBOL(udp_lib_setsockopt);
1725EXPORT_SYMBOL(udp_poll); 1722EXPORT_SYMBOL(udp_poll);
1723EXPORT_SYMBOL(udp_lib_get_port);
1726 1724
1727#ifdef CONFIG_PROC_FS 1725#ifdef CONFIG_PROC_FS
1728EXPORT_SYMBOL(udp_proc_register); 1726EXPORT_SYMBOL(udp_proc_register);
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 6c55828e41ba..7288bf7977fb 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -8,11 +8,7 @@
8extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int ); 8extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int );
9extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []); 9extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []);
10 10
11extern int __udp_lib_get_port(struct sock *sk, unsigned short snum, 11extern int udp_v4_get_port(struct sock *sk, unsigned short snum);
12 struct hlist_head udptable[],
13 int (*)(const struct sock*,const struct sock*));
14extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
15
16 12
17extern int udp_setsockopt(struct sock *sk, int level, int optname, 13extern int udp_setsockopt(struct sock *sk, int level, int optname,
18 char __user *optval, int optlen); 14 char __user *optval, int optlen);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 001b881ca36f..72ce26b6c4d3 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -17,17 +17,6 @@ DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics) __read_mostly;
17 17
18struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; 18struct hlist_head udplite_hash[UDP_HTABLE_SIZE];
19 19
20int udplite_get_port(struct sock *sk, unsigned short p,
21 int (*c)(const struct sock *, const struct sock *))
22{
23 return __udp_lib_get_port(sk, p, udplite_hash, c);
24}
25
26static int udplite_v4_get_port(struct sock *sk, unsigned short snum)
27{
28 return udplite_get_port(sk, snum, ipv4_rcv_saddr_equal);
29}
30
31static int udplite_rcv(struct sk_buff *skb) 20static int udplite_rcv(struct sk_buff *skb)
32{ 21{
33 return __udp4_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE); 22 return __udp4_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE);
@@ -42,10 +31,9 @@ static struct net_protocol udplite_protocol = {
42 .handler = udplite_rcv, 31 .handler = udplite_rcv,
43 .err_handler = udplite_err, 32 .err_handler = udplite_err,
44 .no_policy = 1, 33 .no_policy = 1,
34 .netns_ok = 1,
45}; 35};
46 36
47DEFINE_PROTO_INUSE(udplite)
48
49struct proto udplite_prot = { 37struct proto udplite_prot = {
50 .name = "UDP-Lite", 38 .name = "UDP-Lite",
51 .owner = THIS_MODULE, 39 .owner = THIS_MODULE,
@@ -63,13 +51,13 @@ struct proto udplite_prot = {
63 .backlog_rcv = udp_queue_rcv_skb, 51 .backlog_rcv = udp_queue_rcv_skb,
64 .hash = udp_lib_hash, 52 .hash = udp_lib_hash,
65 .unhash = udp_lib_unhash, 53 .unhash = udp_lib_unhash,
66 .get_port = udplite_v4_get_port, 54 .get_port = udp_v4_get_port,
67 .obj_size = sizeof(struct udp_sock), 55 .obj_size = sizeof(struct udp_sock),
56 .h.udp_hash = udplite_hash,
68#ifdef CONFIG_COMPAT 57#ifdef CONFIG_COMPAT
69 .compat_setsockopt = compat_udp_setsockopt, 58 .compat_setsockopt = compat_udp_setsockopt,
70 .compat_getsockopt = compat_udp_getsockopt, 59 .compat_getsockopt = compat_udp_getsockopt,
71#endif 60#endif
72 REF_PROTO_INUSE(udplite)
73}; 61};
74 62
75static struct inet_protosw udplite4_protosw = { 63static struct inet_protosw udplite4_protosw = {
@@ -83,15 +71,42 @@ static struct inet_protosw udplite4_protosw = {
83}; 71};
84 72
85#ifdef CONFIG_PROC_FS 73#ifdef CONFIG_PROC_FS
86static struct file_operations udplite4_seq_fops;
87static struct udp_seq_afinfo udplite4_seq_afinfo = { 74static struct udp_seq_afinfo udplite4_seq_afinfo = {
88 .owner = THIS_MODULE,
89 .name = "udplite", 75 .name = "udplite",
90 .family = AF_INET, 76 .family = AF_INET,
91 .hashtable = udplite_hash, 77 .hashtable = udplite_hash,
92 .seq_show = udp4_seq_show, 78 .seq_fops = {
93 .seq_fops = &udplite4_seq_fops, 79 .owner = THIS_MODULE,
80 },
81 .seq_ops = {
82 .show = udp4_seq_show,
83 },
84};
85
86static int udplite4_proc_init_net(struct net *net)
87{
88 return udp_proc_register(net, &udplite4_seq_afinfo);
89}
90
91static void udplite4_proc_exit_net(struct net *net)
92{
93 udp_proc_unregister(net, &udplite4_seq_afinfo);
94}
95
96static struct pernet_operations udplite4_net_ops = {
97 .init = udplite4_proc_init_net,
98 .exit = udplite4_proc_exit_net,
94}; 99};
100
101static __init int udplite4_proc_init(void)
102{
103 return register_pernet_subsys(&udplite4_net_ops);
104}
105#else
106static inline int udplite4_proc_init(void)
107{
108 return 0;
109}
95#endif 110#endif
96 111
97void __init udplite4_register(void) 112void __init udplite4_register(void)
@@ -104,18 +119,15 @@ void __init udplite4_register(void)
104 119
105 inet_register_protosw(&udplite4_protosw); 120 inet_register_protosw(&udplite4_protosw);
106 121
107#ifdef CONFIG_PROC_FS 122 if (udplite4_proc_init())
108 if (udp_proc_register(&udplite4_seq_afinfo)) /* udplite4_proc_init() */ 123 printk(KERN_ERR "%s: Cannot register /proc!\n", __func__);
109 printk(KERN_ERR "%s: Cannot register /proc!\n", __FUNCTION__);
110#endif
111 return; 124 return;
112 125
113out_unregister_proto: 126out_unregister_proto:
114 proto_unregister(&udplite_prot); 127 proto_unregister(&udplite_prot);
115out_register_err: 128out_register_err:
116 printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __FUNCTION__); 129 printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__);
117} 130}
118 131
119EXPORT_SYMBOL(udplite_hash); 132EXPORT_SYMBOL(udplite_hash);
120EXPORT_SYMBOL(udplite_prot); 133EXPORT_SYMBOL(udplite_prot);
121EXPORT_SYMBOL(udplite_get_port);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 10ed70491434..c63de0a72aba 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -221,7 +221,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
221 xdst = (struct xfrm_dst *)dst; 221 xdst = (struct xfrm_dst *)dst;
222 if (xdst->u.rt.idev->dev == dev) { 222 if (xdst->u.rt.idev->dev == dev) {
223 struct in_device *loopback_idev = 223 struct in_device *loopback_idev =
224 in_dev_get(dev->nd_net->loopback_dev); 224 in_dev_get(dev_net(dev)->loopback_dev);
225 BUG_ON(!loopback_idev); 225 BUG_ON(!loopback_idev);
226 226
227 do { 227 do {
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 47263e45bacb..42814a2ec9d7 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5# IPv6 as module will cause a CRASH if you try to unload it 5# IPv6 as module will cause a CRASH if you try to unload it
6config IPV6 6menuconfig IPV6
7 tristate "The IPv6 protocol" 7 tristate "The IPv6 protocol"
8 default m 8 default m
9 ---help--- 9 ---help---
@@ -19,9 +19,10 @@ config IPV6
19 To compile this protocol support as a module, choose M here: the 19 To compile this protocol support as a module, choose M here: the
20 module will be called ipv6. 20 module will be called ipv6.
21 21
22if IPV6
23
22config IPV6_PRIVACY 24config IPV6_PRIVACY
23 bool "IPv6: Privacy Extensions support" 25 bool "IPv6: Privacy Extensions support"
24 depends on IPV6
25 ---help--- 26 ---help---
26 Privacy Extensions for Stateless Address Autoconfiguration in IPv6 27 Privacy Extensions for Stateless Address Autoconfiguration in IPv6
27 support. With this option, additional periodically-alter 28 support. With this option, additional periodically-alter
@@ -40,7 +41,6 @@ config IPV6_PRIVACY
40 41
41config IPV6_ROUTER_PREF 42config IPV6_ROUTER_PREF
42 bool "IPv6: Router Preference (RFC 4191) support" 43 bool "IPv6: Router Preference (RFC 4191) support"
43 depends on IPV6
44 ---help--- 44 ---help---
45 Router Preference is an optional extension to the Router 45 Router Preference is an optional extension to the Router
46 Advertisement message to improve the ability of hosts 46 Advertisement message to improve the ability of hosts
@@ -59,7 +59,7 @@ config IPV6_ROUTE_INFO
59 59
60config IPV6_OPTIMISTIC_DAD 60config IPV6_OPTIMISTIC_DAD
61 bool "IPv6: Enable RFC 4429 Optimistic DAD (EXPERIMENTAL)" 61 bool "IPv6: Enable RFC 4429 Optimistic DAD (EXPERIMENTAL)"
62 depends on IPV6 && EXPERIMENTAL 62 depends on EXPERIMENTAL
63 ---help--- 63 ---help---
64 This is experimental support for optimistic Duplicate 64 This is experimental support for optimistic Duplicate
65 Address Detection. It allows for autoconfigured addresses 65 Address Detection. It allows for autoconfigured addresses
@@ -69,7 +69,6 @@ config IPV6_OPTIMISTIC_DAD
69 69
70config INET6_AH 70config INET6_AH
71 tristate "IPv6: AH transformation" 71 tristate "IPv6: AH transformation"
72 depends on IPV6
73 select XFRM 72 select XFRM
74 select CRYPTO 73 select CRYPTO
75 select CRYPTO_HMAC 74 select CRYPTO_HMAC
@@ -82,7 +81,6 @@ config INET6_AH
82 81
83config INET6_ESP 82config INET6_ESP
84 tristate "IPv6: ESP transformation" 83 tristate "IPv6: ESP transformation"
85 depends on IPV6
86 select XFRM 84 select XFRM
87 select CRYPTO 85 select CRYPTO
88 select CRYPTO_AUTHENC 86 select CRYPTO_AUTHENC
@@ -98,7 +96,6 @@ config INET6_ESP
98 96
99config INET6_IPCOMP 97config INET6_IPCOMP
100 tristate "IPv6: IPComp transformation" 98 tristate "IPv6: IPComp transformation"
101 depends on IPV6
102 select XFRM 99 select XFRM
103 select INET6_XFRM_TUNNEL 100 select INET6_XFRM_TUNNEL
104 select CRYPTO 101 select CRYPTO
@@ -111,7 +108,7 @@ config INET6_IPCOMP
111 108
112config IPV6_MIP6 109config IPV6_MIP6
113 tristate "IPv6: Mobility (EXPERIMENTAL)" 110 tristate "IPv6: Mobility (EXPERIMENTAL)"
114 depends on IPV6 && EXPERIMENTAL 111 depends on EXPERIMENTAL
115 select XFRM 112 select XFRM
116 ---help--- 113 ---help---
117 Support for IPv6 Mobility described in RFC 3775. 114 Support for IPv6 Mobility described in RFC 3775.
@@ -129,7 +126,6 @@ config INET6_TUNNEL
129 126
130config INET6_XFRM_MODE_TRANSPORT 127config INET6_XFRM_MODE_TRANSPORT
131 tristate "IPv6: IPsec transport mode" 128 tristate "IPv6: IPsec transport mode"
132 depends on IPV6
133 default IPV6 129 default IPV6
134 select XFRM 130 select XFRM
135 ---help--- 131 ---help---
@@ -139,7 +135,6 @@ config INET6_XFRM_MODE_TRANSPORT
139 135
140config INET6_XFRM_MODE_TUNNEL 136config INET6_XFRM_MODE_TUNNEL
141 tristate "IPv6: IPsec tunnel mode" 137 tristate "IPv6: IPsec tunnel mode"
142 depends on IPV6
143 default IPV6 138 default IPV6
144 select XFRM 139 select XFRM
145 ---help--- 140 ---help---
@@ -149,7 +144,6 @@ config INET6_XFRM_MODE_TUNNEL
149 144
150config INET6_XFRM_MODE_BEET 145config INET6_XFRM_MODE_BEET
151 tristate "IPv6: IPsec BEET mode" 146 tristate "IPv6: IPsec BEET mode"
152 depends on IPV6
153 default IPV6 147 default IPV6
154 select XFRM 148 select XFRM
155 ---help--- 149 ---help---
@@ -159,15 +153,15 @@ config INET6_XFRM_MODE_BEET
159 153
160config INET6_XFRM_MODE_ROUTEOPTIMIZATION 154config INET6_XFRM_MODE_ROUTEOPTIMIZATION
161 tristate "IPv6: MIPv6 route optimization mode (EXPERIMENTAL)" 155 tristate "IPv6: MIPv6 route optimization mode (EXPERIMENTAL)"
162 depends on IPV6 && EXPERIMENTAL 156 depends on EXPERIMENTAL
163 select XFRM 157 select XFRM
164 ---help--- 158 ---help---
165 Support for MIPv6 route optimization mode. 159 Support for MIPv6 route optimization mode.
166 160
167config IPV6_SIT 161config IPV6_SIT
168 tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)" 162 tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
169 depends on IPV6
170 select INET_TUNNEL 163 select INET_TUNNEL
164 select IPV6_NDISC_NODETYPE
171 default y 165 default y
172 ---help--- 166 ---help---
173 Tunneling means encapsulating data of one protocol type within 167 Tunneling means encapsulating data of one protocol type within
@@ -178,10 +172,12 @@ config IPV6_SIT
178 172
179 Saying M here will produce a module called sit.ko. If unsure, say Y. 173 Saying M here will produce a module called sit.ko. If unsure, say Y.
180 174
175config IPV6_NDISC_NODETYPE
176 bool
177
181config IPV6_TUNNEL 178config IPV6_TUNNEL
182 tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)" 179 tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)"
183 select INET6_TUNNEL 180 select INET6_TUNNEL
184 depends on IPV6
185 ---help--- 181 ---help---
186 Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in 182 Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in
187 RFC 2473. 183 RFC 2473.
@@ -190,7 +186,7 @@ config IPV6_TUNNEL
190 186
191config IPV6_MULTIPLE_TABLES 187config IPV6_MULTIPLE_TABLES
192 bool "IPv6: Multiple Routing Tables" 188 bool "IPv6: Multiple Routing Tables"
193 depends on IPV6 && EXPERIMENTAL 189 depends on EXPERIMENTAL
194 select FIB_RULES 190 select FIB_RULES
195 ---help--- 191 ---help---
196 Support multiple routing tables. 192 Support multiple routing tables.
@@ -209,3 +205,18 @@ config IPV6_SUBTREES
209 205
210 If unsure, say N. 206 If unsure, say N.
211 207
208config IPV6_MROUTE
209 bool "IPv6: multicast routing (EXPERIMENTAL)"
210 depends on IPV6 && EXPERIMENTAL
211 ---help---
212 Experimental support for IPv6 multicast forwarding.
213 If unsure, say N.
214
215config IPV6_PIMSM_V2
216 bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)"
217 depends on IPV6_MROUTE
218 ---help---
219 Support for IPv6 PIM multicast routing protocol PIM-SMv2.
220 If unsure, say N.
221
222endif # IPV6
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 24f3aa0f2a35..686934acfac1 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -11,11 +11,14 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o 11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
12 12
13ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o 13ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
14ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
15
14ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \ 16ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
15 xfrm6_output.o 17 xfrm6_output.o
16ipv6-$(CONFIG_NETFILTER) += netfilter.o 18ipv6-$(CONFIG_NETFILTER) += netfilter.o
17ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o 19ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o
18ipv6-$(CONFIG_PROC_FS) += proc.o 20ipv6-$(CONFIG_PROC_FS) += proc.o
21ipv6-$(CONFIG_SYN_COOKIES) += syncookies.o
19 22
20ipv6-objs += $(ipv6-y) 23ipv6-objs += $(ipv6-y)
21 24
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e08955baedff..8a0fd4007bdb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -222,6 +222,8 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
222/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ 222/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
223const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; 223const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
224const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; 224const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
225const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
226const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
225 227
226/* Check if a valid qdisc is available */ 228/* Check if a valid qdisc is available */
227static inline int addrconf_qdisc_ok(struct net_device *dev) 229static inline int addrconf_qdisc_ok(struct net_device *dev)
@@ -321,7 +323,6 @@ EXPORT_SYMBOL(in6_dev_finish_destroy);
321static struct inet6_dev * ipv6_add_dev(struct net_device *dev) 323static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
322{ 324{
323 struct inet6_dev *ndev; 325 struct inet6_dev *ndev;
324 struct in6_addr maddr;
325 326
326 ASSERT_RTNL(); 327 ASSERT_RTNL();
327 328
@@ -335,7 +336,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
335 336
336 rwlock_init(&ndev->lock); 337 rwlock_init(&ndev->lock);
337 ndev->dev = dev; 338 ndev->dev = dev;
338 memcpy(&ndev->cnf, dev->nd_net->ipv6.devconf_dflt, sizeof(ndev->cnf)); 339 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
339 ndev->cnf.mtu6 = dev->mtu; 340 ndev->cnf.mtu6 = dev->mtu;
340 ndev->cnf.sysctl = NULL; 341 ndev->cnf.sysctl = NULL;
341 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); 342 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -349,7 +350,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
349 if (snmp6_alloc_dev(ndev) < 0) { 350 if (snmp6_alloc_dev(ndev) < 0) {
350 ADBG((KERN_WARNING 351 ADBG((KERN_WARNING
351 "%s(): cannot allocate memory for statistics; dev=%s.\n", 352 "%s(): cannot allocate memory for statistics; dev=%s.\n",
352 __FUNCTION__, dev->name)); 353 __func__, dev->name));
353 neigh_parms_release(&nd_tbl, ndev->nd_parms); 354 neigh_parms_release(&nd_tbl, ndev->nd_parms);
354 ndev->dead = 1; 355 ndev->dead = 1;
355 in6_dev_finish_destroy(ndev); 356 in6_dev_finish_destroy(ndev);
@@ -359,7 +360,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
359 if (snmp6_register_dev(ndev) < 0) { 360 if (snmp6_register_dev(ndev) < 0) {
360 ADBG((KERN_WARNING 361 ADBG((KERN_WARNING
361 "%s(): cannot create /proc/net/dev_snmp6/%s\n", 362 "%s(): cannot create /proc/net/dev_snmp6/%s\n",
362 __FUNCTION__, dev->name)); 363 __func__, dev->name));
363 neigh_parms_release(&nd_tbl, ndev->nd_parms); 364 neigh_parms_release(&nd_tbl, ndev->nd_parms);
364 ndev->dead = 1; 365 ndev->dead = 1;
365 in6_dev_finish_destroy(ndev); 366 in6_dev_finish_destroy(ndev);
@@ -407,8 +408,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
407 rcu_assign_pointer(dev->ip6_ptr, ndev); 408 rcu_assign_pointer(dev->ip6_ptr, ndev);
408 409
409 /* Join all-node multicast group */ 410 /* Join all-node multicast group */
410 ipv6_addr_all_nodes(&maddr); 411 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
411 ipv6_dev_mc_inc(dev, &maddr);
412 412
413 return ndev; 413 return ndev;
414} 414}
@@ -434,18 +434,15 @@ static void dev_forward_change(struct inet6_dev *idev)
434{ 434{
435 struct net_device *dev; 435 struct net_device *dev;
436 struct inet6_ifaddr *ifa; 436 struct inet6_ifaddr *ifa;
437 struct in6_addr addr;
438 437
439 if (!idev) 438 if (!idev)
440 return; 439 return;
441 dev = idev->dev; 440 dev = idev->dev;
442 if (dev && (dev->flags & IFF_MULTICAST)) { 441 if (dev && (dev->flags & IFF_MULTICAST)) {
443 ipv6_addr_all_routers(&addr);
444
445 if (idev->cnf.forwarding) 442 if (idev->cnf.forwarding)
446 ipv6_dev_mc_inc(dev, &addr); 443 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
447 else 444 else
448 ipv6_dev_mc_dec(dev, &addr); 445 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
449 } 446 }
450 for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) { 447 for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) {
451 if (ifa->flags&IFA_F_TENTATIVE) 448 if (ifa->flags&IFA_F_TENTATIVE)
@@ -494,7 +491,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
494 dev_forward_change((struct inet6_dev *)table->extra1); 491 dev_forward_change((struct inet6_dev *)table->extra1);
495 492
496 if (*p) 493 if (*p)
497 rt6_purge_dflt_routers(); 494 rt6_purge_dflt_routers(net);
498} 495}
499#endif 496#endif
500 497
@@ -542,6 +539,25 @@ ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
542 *ifap = ifp; 539 *ifap = ifp;
543} 540}
544 541
542/*
543 * Hash function taken from net_alias.c
544 */
545static u8 ipv6_addr_hash(const struct in6_addr *addr)
546{
547 __u32 word;
548
549 /*
550 * We perform the hash function over the last 64 bits of the address
551 * This will include the IEEE address token on links that support it.
552 */
553
554 word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]);
555 word ^= (word >> 16);
556 word ^= (word >> 8);
557
558 return ((word ^ (word >> 4)) & 0x0f);
559}
560
545/* On success it returns ifp with increased reference count */ 561/* On success it returns ifp with increased reference count */
546 562
547static struct inet6_ifaddr * 563static struct inet6_ifaddr *
@@ -562,7 +578,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
562 write_lock(&addrconf_hash_lock); 578 write_lock(&addrconf_hash_lock);
563 579
564 /* Ignore adding duplicate addresses on an interface */ 580 /* Ignore adding duplicate addresses on an interface */
565 if (ipv6_chk_same_addr(&init_net, addr, idev->dev)) { 581 if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
566 ADBG(("ipv6_add_addr: already assigned\n")); 582 ADBG(("ipv6_add_addr: already assigned\n"));
567 err = -EEXIST; 583 err = -EEXIST;
568 goto out; 584 goto out;
@@ -752,9 +768,9 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
752 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) { 768 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
753 struct in6_addr prefix; 769 struct in6_addr prefix;
754 struct rt6_info *rt; 770 struct rt6_info *rt;
755 771 struct net *net = dev_net(ifp->idev->dev);
756 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 772 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
757 rt = rt6_lookup(&prefix, NULL, ifp->idev->dev->ifindex, 1); 773 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
758 774
759 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 775 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) {
760 if (onlink == 0) { 776 if (onlink == 0) {
@@ -894,20 +910,40 @@ out:
894/* 910/*
895 * Choose an appropriate source address (RFC3484) 911 * Choose an appropriate source address (RFC3484)
896 */ 912 */
913enum {
914 IPV6_SADDR_RULE_INIT = 0,
915 IPV6_SADDR_RULE_LOCAL,
916 IPV6_SADDR_RULE_SCOPE,
917 IPV6_SADDR_RULE_PREFERRED,
918#ifdef CONFIG_IPV6_MIP6
919 IPV6_SADDR_RULE_HOA,
920#endif
921 IPV6_SADDR_RULE_OIF,
922 IPV6_SADDR_RULE_LABEL,
923#ifdef CONFIG_IPV6_PRIVACY
924 IPV6_SADDR_RULE_PRIVACY,
925#endif
926 IPV6_SADDR_RULE_ORCHID,
927 IPV6_SADDR_RULE_PREFIX,
928 IPV6_SADDR_RULE_MAX
929};
930
897struct ipv6_saddr_score { 931struct ipv6_saddr_score {
898 int addr_type; 932 int rule;
899 unsigned int attrs; 933 int addr_type;
900 int matchlen; 934 struct inet6_ifaddr *ifa;
901 int scope; 935 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
902 unsigned int rule; 936 int scopedist;
937 int matchlen;
903}; 938};
904 939
905#define IPV6_SADDR_SCORE_LOCAL 0x0001 940struct ipv6_saddr_dst {
906#define IPV6_SADDR_SCORE_PREFERRED 0x0004 941 const struct in6_addr *addr;
907#define IPV6_SADDR_SCORE_HOA 0x0008 942 int ifindex;
908#define IPV6_SADDR_SCORE_OIF 0x0010 943 int scope;
909#define IPV6_SADDR_SCORE_LABEL 0x0020 944 int label;
910#define IPV6_SADDR_SCORE_PRIVACY 0x0040 945 unsigned int prefs;
946};
911 947
912static inline int ipv6_saddr_preferred(int type) 948static inline int ipv6_saddr_preferred(int type)
913{ 949{
@@ -917,27 +953,152 @@ static inline int ipv6_saddr_preferred(int type)
917 return 0; 953 return 0;
918} 954}
919 955
920int ipv6_dev_get_saddr(struct net_device *daddr_dev, 956static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score,
921 struct in6_addr *daddr, struct in6_addr *saddr) 957 struct ipv6_saddr_dst *dst,
958 int i)
922{ 959{
923 struct ipv6_saddr_score hiscore; 960 int ret;
924 struct inet6_ifaddr *ifa_result = NULL; 961
925 int daddr_type = __ipv6_addr_type(daddr); 962 if (i <= score->rule) {
926 int daddr_scope = __ipv6_addr_src_scope(daddr_type); 963 switch (i) {
927 int daddr_ifindex = daddr_dev ? daddr_dev->ifindex : 0; 964 case IPV6_SADDR_RULE_SCOPE:
928 u32 daddr_label = ipv6_addr_label(daddr, daddr_type, daddr_ifindex); 965 ret = score->scopedist;
966 break;
967 case IPV6_SADDR_RULE_PREFIX:
968 ret = score->matchlen;
969 break;
970 default:
971 ret = !!test_bit(i, score->scorebits);
972 }
973 goto out;
974 }
975
976 switch (i) {
977 case IPV6_SADDR_RULE_INIT:
978 /* Rule 0: remember if hiscore is not ready yet */
979 ret = !!score->ifa;
980 break;
981 case IPV6_SADDR_RULE_LOCAL:
982 /* Rule 1: Prefer same address */
983 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
984 break;
985 case IPV6_SADDR_RULE_SCOPE:
986 /* Rule 2: Prefer appropriate scope
987 *
988 * ret
989 * ^
990 * -1 | d 15
991 * ---+--+-+---> scope
992 * |
993 * | d is scope of the destination.
994 * B-d | \
995 * | \ <- smaller scope is better if
996 * B-15 | \ if scope is enough for destinaion.
997 * | ret = B - scope (-1 <= scope >= d <= 15).
998 * d-C-1 | /
999 * |/ <- greater is better
1000 * -C / if scope is not enough for destination.
1001 * /| ret = scope - C (-1 <= d < scope <= 15).
1002 *
1003 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1004 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1005 * Assume B = 0 and we get C > 29.
1006 */
1007 ret = __ipv6_addr_src_scope(score->addr_type);
1008 if (ret >= dst->scope)
1009 ret = -ret;
1010 else
1011 ret -= 128; /* 30 is enough */
1012 score->scopedist = ret;
1013 break;
1014 case IPV6_SADDR_RULE_PREFERRED:
1015 /* Rule 3: Avoid deprecated and optimistic addresses */
1016 ret = ipv6_saddr_preferred(score->addr_type) ||
1017 !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC));
1018 break;
1019#ifdef CONFIG_IPV6_MIP6
1020 case IPV6_SADDR_RULE_HOA:
1021 {
1022 /* Rule 4: Prefer home address */
1023 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1024 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1025 break;
1026 }
1027#endif
1028 case IPV6_SADDR_RULE_OIF:
1029 /* Rule 5: Prefer outgoing interface */
1030 ret = (!dst->ifindex ||
1031 dst->ifindex == score->ifa->idev->dev->ifindex);
1032 break;
1033 case IPV6_SADDR_RULE_LABEL:
1034 /* Rule 6: Prefer matching label */
1035 ret = ipv6_addr_label(&score->ifa->addr, score->addr_type,
1036 score->ifa->idev->dev->ifindex) == dst->label;
1037 break;
1038#ifdef CONFIG_IPV6_PRIVACY
1039 case IPV6_SADDR_RULE_PRIVACY:
1040 {
1041 /* Rule 7: Prefer public address
1042 * Note: prefer temprary address if use_tempaddr >= 2
1043 */
1044 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1045 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1046 score->ifa->idev->cnf.use_tempaddr >= 2;
1047 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1048 break;
1049 }
1050#endif
1051 case IPV6_SADDR_RULE_ORCHID:
1052 /* Rule 8-: Prefer ORCHID vs ORCHID or
1053 * non-ORCHID vs non-ORCHID
1054 */
1055 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1056 ipv6_addr_orchid(dst->addr));
1057 break;
1058 case IPV6_SADDR_RULE_PREFIX:
1059 /* Rule 8: Use longest matching prefix */
1060 score->matchlen = ret = ipv6_addr_diff(&score->ifa->addr,
1061 dst->addr);
1062 break;
1063 default:
1064 ret = 0;
1065 }
1066
1067 if (ret)
1068 __set_bit(i, score->scorebits);
1069 score->rule = i;
1070out:
1071 return ret;
1072}
1073
1074int ipv6_dev_get_saddr(struct net_device *dst_dev,
1075 const struct in6_addr *daddr, unsigned int prefs,
1076 struct in6_addr *saddr)
1077{
1078 struct ipv6_saddr_score scores[2],
1079 *score = &scores[0], *hiscore = &scores[1];
1080 struct net *net = dev_net(dst_dev);
1081 struct ipv6_saddr_dst dst;
929 struct net_device *dev; 1082 struct net_device *dev;
1083 int dst_type;
1084
1085 dst_type = __ipv6_addr_type(daddr);
1086 dst.addr = daddr;
1087 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1088 dst.scope = __ipv6_addr_src_scope(dst_type);
1089 dst.label = ipv6_addr_label(daddr, dst_type, dst.ifindex);
1090 dst.prefs = prefs;
930 1091
931 memset(&hiscore, 0, sizeof(hiscore)); 1092 hiscore->rule = -1;
1093 hiscore->ifa = NULL;
932 1094
933 read_lock(&dev_base_lock); 1095 read_lock(&dev_base_lock);
934 rcu_read_lock(); 1096 rcu_read_lock();
935 1097
936 for_each_netdev(&init_net, dev) { 1098 for_each_netdev(net, dev) {
937 struct inet6_dev *idev; 1099 struct inet6_dev *idev;
938 struct inet6_ifaddr *ifa;
939 1100
940 /* Rule 0: Candidate Source Address (section 4) 1101 /* Candidate Source Address (section 4)
941 * - multicast and link-local destination address, 1102 * - multicast and link-local destination address,
942 * the set of candidate source address MUST only 1103 * the set of candidate source address MUST only
943 * include addresses assigned to interfaces 1104 * include addresses assigned to interfaces
@@ -949,9 +1110,9 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
949 * belonging to the same site as the outgoing 1110 * belonging to the same site as the outgoing
950 * interface.) 1111 * interface.)
951 */ 1112 */
952 if ((daddr_type & IPV6_ADDR_MULTICAST || 1113 if (((dst_type & IPV6_ADDR_MULTICAST) ||
953 daddr_scope <= IPV6_ADDR_SCOPE_LINKLOCAL) && 1114 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
954 daddr_dev && dev != daddr_dev) 1115 dst.ifindex && dev->ifindex != dst.ifindex)
955 continue; 1116 continue;
956 1117
957 idev = __in6_dev_get(dev); 1118 idev = __in6_dev_get(dev);
@@ -959,12 +1120,10 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
959 continue; 1120 continue;
960 1121
961 read_lock_bh(&idev->lock); 1122 read_lock_bh(&idev->lock);
962 for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) { 1123 for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) {
963 struct ipv6_saddr_score score; 1124 int i;
964
965 score.addr_type = __ipv6_addr_type(&ifa->addr);
966 1125
967 /* Rule 0: 1126 /*
968 * - Tentative Address (RFC2462 section 5.4) 1127 * - Tentative Address (RFC2462 section 5.4)
969 * - A tentative address is not considered 1128 * - A tentative address is not considered
970 * "assigned to an interface" in the traditional 1129 * "assigned to an interface" in the traditional
@@ -974,11 +1133,14 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
974 * addresses, and the unspecified address MUST 1133 * addresses, and the unspecified address MUST
975 * NOT be included in a candidate set. 1134 * NOT be included in a candidate set.
976 */ 1135 */
977 if ((ifa->flags & IFA_F_TENTATIVE) && 1136 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
978 (!(ifa->flags & IFA_F_OPTIMISTIC))) 1137 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
979 continue; 1138 continue;
980 if (unlikely(score.addr_type == IPV6_ADDR_ANY || 1139
981 score.addr_type & IPV6_ADDR_MULTICAST)) { 1140 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1141
1142 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1143 score->addr_type & IPV6_ADDR_MULTICAST)) {
982 LIMIT_NETDEBUG(KERN_DEBUG 1144 LIMIT_NETDEBUG(KERN_DEBUG
983 "ADDRCONF: unspecified / multicast address " 1145 "ADDRCONF: unspecified / multicast address "
984 "assigned as unicast address on %s", 1146 "assigned as unicast address on %s",
@@ -986,207 +1148,63 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
986 continue; 1148 continue;
987 } 1149 }
988 1150
989 score.attrs = 0; 1151 score->rule = -1;
990 score.matchlen = 0; 1152 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
991 score.scope = 0; 1153
992 score.rule = 0; 1154 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1155 int minihiscore, miniscore;
1156
1157 minihiscore = ipv6_get_saddr_eval(hiscore, &dst, i);
1158 miniscore = ipv6_get_saddr_eval(score, &dst, i);
1159
1160 if (minihiscore > miniscore) {
1161 if (i == IPV6_SADDR_RULE_SCOPE &&
1162 score->scopedist > 0) {
1163 /*
1164 * special case:
1165 * each remaining entry
1166 * has too small (not enough)
1167 * scope, because ifa entries
1168 * are sorted by their scope
1169 * values.
1170 */
1171 goto try_nextdev;
1172 }
1173 break;
1174 } else if (minihiscore < miniscore) {
1175 struct ipv6_saddr_score *tmp;
993 1176
994 if (ifa_result == NULL) { 1177 if (hiscore->ifa)
995 /* record it if the first available entry */ 1178 in6_ifa_put(hiscore->ifa);
996 goto record_it;
997 }
998 1179
999 /* Rule 1: Prefer same address */ 1180 in6_ifa_hold(score->ifa);
1000 if (hiscore.rule < 1) {
1001 if (ipv6_addr_equal(&ifa_result->addr, daddr))
1002 hiscore.attrs |= IPV6_SADDR_SCORE_LOCAL;
1003 hiscore.rule++;
1004 }
1005 if (ipv6_addr_equal(&ifa->addr, daddr)) {
1006 score.attrs |= IPV6_SADDR_SCORE_LOCAL;
1007 if (!(hiscore.attrs & IPV6_SADDR_SCORE_LOCAL)) {
1008 score.rule = 1;
1009 goto record_it;
1010 }
1011 } else {
1012 if (hiscore.attrs & IPV6_SADDR_SCORE_LOCAL)
1013 continue;
1014 }
1015 1181
1016 /* Rule 2: Prefer appropriate scope */ 1182 tmp = hiscore;
1017 if (hiscore.rule < 2) { 1183 hiscore = score;
1018 hiscore.scope = __ipv6_addr_src_scope(hiscore.addr_type); 1184 score = tmp;
1019 hiscore.rule++;
1020 }
1021 score.scope = __ipv6_addr_src_scope(score.addr_type);
1022 if (hiscore.scope < score.scope) {
1023 if (hiscore.scope < daddr_scope) {
1024 score.rule = 2;
1025 goto record_it;
1026 } else
1027 continue;
1028 } else if (score.scope < hiscore.scope) {
1029 if (score.scope < daddr_scope)
1030 break; /* addresses sorted by scope */
1031 else {
1032 score.rule = 2;
1033 goto record_it;
1034 }
1035 }
1036 1185
1037 /* Rule 3: Avoid deprecated and optimistic addresses */ 1186 /* restore our iterator */
1038 if (hiscore.rule < 3) { 1187 score->ifa = hiscore->ifa;
1039 if (ipv6_saddr_preferred(hiscore.addr_type) ||
1040 (((ifa_result->flags &
1041 (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0)))
1042 hiscore.attrs |= IPV6_SADDR_SCORE_PREFERRED;
1043 hiscore.rule++;
1044 }
1045 if (ipv6_saddr_preferred(score.addr_type) ||
1046 (((ifa->flags &
1047 (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) {
1048 score.attrs |= IPV6_SADDR_SCORE_PREFERRED;
1049 if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) {
1050 score.rule = 3;
1051 goto record_it;
1052 }
1053 } else {
1054 if (hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)
1055 continue;
1056 }
1057 1188
1058 /* Rule 4: Prefer home address */ 1189 break;
1059#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
1060 if (hiscore.rule < 4) {
1061 if (ifa_result->flags & IFA_F_HOMEADDRESS)
1062 hiscore.attrs |= IPV6_SADDR_SCORE_HOA;
1063 hiscore.rule++;
1064 }
1065 if (ifa->flags & IFA_F_HOMEADDRESS) {
1066 score.attrs |= IPV6_SADDR_SCORE_HOA;
1067 if (!(ifa_result->flags & IFA_F_HOMEADDRESS)) {
1068 score.rule = 4;
1069 goto record_it;
1070 }
1071 } else {
1072 if (hiscore.attrs & IPV6_SADDR_SCORE_HOA)
1073 continue;
1074 }
1075#else
1076 if (hiscore.rule < 4)
1077 hiscore.rule++;
1078#endif
1079
1080 /* Rule 5: Prefer outgoing interface */
1081 if (hiscore.rule < 5) {
1082 if (daddr_dev == NULL ||
1083 daddr_dev == ifa_result->idev->dev)
1084 hiscore.attrs |= IPV6_SADDR_SCORE_OIF;
1085 hiscore.rule++;
1086 }
1087 if (daddr_dev == NULL ||
1088 daddr_dev == ifa->idev->dev) {
1089 score.attrs |= IPV6_SADDR_SCORE_OIF;
1090 if (!(hiscore.attrs & IPV6_SADDR_SCORE_OIF)) {
1091 score.rule = 5;
1092 goto record_it;
1093 }
1094 } else {
1095 if (hiscore.attrs & IPV6_SADDR_SCORE_OIF)
1096 continue;
1097 }
1098
1099 /* Rule 6: Prefer matching label */
1100 if (hiscore.rule < 6) {
1101 if (ipv6_addr_label(&ifa_result->addr,
1102 hiscore.addr_type,
1103 ifa_result->idev->dev->ifindex) == daddr_label)
1104 hiscore.attrs |= IPV6_SADDR_SCORE_LABEL;
1105 hiscore.rule++;
1106 }
1107 if (ipv6_addr_label(&ifa->addr,
1108 score.addr_type,
1109 ifa->idev->dev->ifindex) == daddr_label) {
1110 score.attrs |= IPV6_SADDR_SCORE_LABEL;
1111 if (!(hiscore.attrs & IPV6_SADDR_SCORE_LABEL)) {
1112 score.rule = 6;
1113 goto record_it;
1114 }
1115 } else {
1116 if (hiscore.attrs & IPV6_SADDR_SCORE_LABEL)
1117 continue;
1118 }
1119
1120#ifdef CONFIG_IPV6_PRIVACY
1121 /* Rule 7: Prefer public address
1122 * Note: prefer temprary address if use_tempaddr >= 2
1123 */
1124 if (hiscore.rule < 7) {
1125 if ((!(ifa_result->flags & IFA_F_TEMPORARY)) ^
1126 (ifa_result->idev->cnf.use_tempaddr >= 2))
1127 hiscore.attrs |= IPV6_SADDR_SCORE_PRIVACY;
1128 hiscore.rule++;
1129 }
1130 if ((!(ifa->flags & IFA_F_TEMPORARY)) ^
1131 (ifa->idev->cnf.use_tempaddr >= 2)) {
1132 score.attrs |= IPV6_SADDR_SCORE_PRIVACY;
1133 if (!(hiscore.attrs & IPV6_SADDR_SCORE_PRIVACY)) {
1134 score.rule = 7;
1135 goto record_it;
1136 } 1190 }
1137 } else {
1138 if (hiscore.attrs & IPV6_SADDR_SCORE_PRIVACY)
1139 continue;
1140 } 1191 }
1141#else
1142 if (hiscore.rule < 7)
1143 hiscore.rule++;
1144#endif
1145 /* Rule 8: Use longest matching prefix */
1146 if (hiscore.rule < 8) {
1147 hiscore.matchlen = ipv6_addr_diff(&ifa_result->addr, daddr);
1148 hiscore.rule++;
1149 }
1150 score.matchlen = ipv6_addr_diff(&ifa->addr, daddr);
1151 if (score.matchlen > hiscore.matchlen) {
1152 score.rule = 8;
1153 goto record_it;
1154 }
1155#if 0
1156 else if (score.matchlen < hiscore.matchlen)
1157 continue;
1158#endif
1159
1160 /* Final Rule: choose first available one */
1161 continue;
1162record_it:
1163 if (ifa_result)
1164 in6_ifa_put(ifa_result);
1165 in6_ifa_hold(ifa);
1166 ifa_result = ifa;
1167 hiscore = score;
1168 } 1192 }
1193try_nextdev:
1169 read_unlock_bh(&idev->lock); 1194 read_unlock_bh(&idev->lock);
1170 } 1195 }
1171 rcu_read_unlock(); 1196 rcu_read_unlock();
1172 read_unlock(&dev_base_lock); 1197 read_unlock(&dev_base_lock);
1173 1198
1174 if (!ifa_result) 1199 if (!hiscore->ifa)
1175 return -EADDRNOTAVAIL; 1200 return -EADDRNOTAVAIL;
1176 1201
1177 ipv6_addr_copy(saddr, &ifa_result->addr); 1202 ipv6_addr_copy(saddr, &hiscore->ifa->addr);
1178 in6_ifa_put(ifa_result); 1203 in6_ifa_put(hiscore->ifa);
1179 return 0; 1204 return 0;
1180} 1205}
1181 1206
1182 1207EXPORT_SYMBOL(ipv6_dev_get_saddr);
1183int ipv6_get_saddr(struct dst_entry *dst,
1184 struct in6_addr *daddr, struct in6_addr *saddr)
1185{
1186 return ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, daddr, saddr);
1187}
1188
1189EXPORT_SYMBOL(ipv6_get_saddr);
1190 1208
1191int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, 1209int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1192 unsigned char banned_flags) 1210 unsigned char banned_flags)
@@ -1232,7 +1250,7 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
1232 1250
1233 read_lock_bh(&addrconf_hash_lock); 1251 read_lock_bh(&addrconf_hash_lock);
1234 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1252 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
1235 if (ifp->idev->dev->nd_net != net) 1253 if (!net_eq(dev_net(ifp->idev->dev), net))
1236 continue; 1254 continue;
1237 if (ipv6_addr_equal(&ifp->addr, addr) && 1255 if (ipv6_addr_equal(&ifp->addr, addr) &&
1238 !(ifp->flags&IFA_F_TENTATIVE)) { 1256 !(ifp->flags&IFA_F_TENTATIVE)) {
@@ -1254,7 +1272,7 @@ int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1254 u8 hash = ipv6_addr_hash(addr); 1272 u8 hash = ipv6_addr_hash(addr);
1255 1273
1256 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1274 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
1257 if (ifp->idev->dev->nd_net != net) 1275 if (!net_eq(dev_net(ifp->idev->dev), net))
1258 continue; 1276 continue;
1259 if (ipv6_addr_equal(&ifp->addr, addr)) { 1277 if (ipv6_addr_equal(&ifp->addr, addr)) {
1260 if (dev == NULL || ifp->idev->dev == dev) 1278 if (dev == NULL || ifp->idev->dev == dev)
@@ -1264,7 +1282,32 @@ int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1264 return ifp != NULL; 1282 return ifp != NULL;
1265} 1283}
1266 1284
1267struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, struct in6_addr *addr, 1285int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
1286{
1287 struct inet6_dev *idev;
1288 struct inet6_ifaddr *ifa;
1289 int onlink;
1290
1291 onlink = 0;
1292 rcu_read_lock();
1293 idev = __in6_dev_get(dev);
1294 if (idev) {
1295 read_lock_bh(&idev->lock);
1296 for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) {
1297 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1298 ifa->prefix_len);
1299 if (onlink)
1300 break;
1301 }
1302 read_unlock_bh(&idev->lock);
1303 }
1304 rcu_read_unlock();
1305 return onlink;
1306}
1307
1308EXPORT_SYMBOL(ipv6_chk_prefix);
1309
1310struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1268 struct net_device *dev, int strict) 1311 struct net_device *dev, int strict)
1269{ 1312{
1270 struct inet6_ifaddr * ifp; 1313 struct inet6_ifaddr * ifp;
@@ -1272,7 +1315,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, struct in6_addr *addr,
1272 1315
1273 read_lock_bh(&addrconf_hash_lock); 1316 read_lock_bh(&addrconf_hash_lock);
1274 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1317 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
1275 if (ifp->idev->dev->nd_net != net) 1318 if (!net_eq(dev_net(ifp->idev->dev), net))
1276 continue; 1319 continue;
1277 if (ipv6_addr_equal(&ifp->addr, addr)) { 1320 if (ipv6_addr_equal(&ifp->addr, addr)) {
1278 if (dev == NULL || ifp->idev->dev == dev || 1321 if (dev == NULL || ifp->idev->dev == dev ||
@@ -1449,6 +1492,29 @@ static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
1449 return 0; 1492 return 0;
1450} 1493}
1451 1494
1495int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
1496{
1497 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
1498 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
1499 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
1500 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
1501 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
1502 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
1503 eui[1] = 0;
1504 eui[2] = 0x5E;
1505 eui[3] = 0xFE;
1506 memcpy(eui + 4, &addr, 4);
1507 return 0;
1508}
1509EXPORT_SYMBOL(__ipv6_isatap_ifid);
1510
1511static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
1512{
1513 if (dev->priv_flags & IFF_ISATAP)
1514 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
1515 return -1;
1516}
1517
1452static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) 1518static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
1453{ 1519{
1454 switch (dev->type) { 1520 switch (dev->type) {
@@ -1461,8 +1527,7 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
1461 case ARPHRD_INFINIBAND: 1527 case ARPHRD_INFINIBAND:
1462 return addrconf_ifid_infiniband(eui, dev); 1528 return addrconf_ifid_infiniband(eui, dev);
1463 case ARPHRD_SIT: 1529 case ARPHRD_SIT:
1464 if (dev->priv_flags & IFF_ISATAP) 1530 return addrconf_ifid_sit(eui, dev);
1465 return ipv6_isatap_eui64(eui, *(__be32 *)dev->dev_addr);
1466 } 1531 }
1467 return -1; 1532 return -1;
1468} 1533}
@@ -1574,7 +1639,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
1574 .fc_expires = expires, 1639 .fc_expires = expires,
1575 .fc_dst_len = plen, 1640 .fc_dst_len = plen,
1576 .fc_flags = RTF_UP | flags, 1641 .fc_flags = RTF_UP | flags,
1577 .fc_nlinfo.nl_net = &init_net, 1642 .fc_nlinfo.nl_net = dev_net(dev),
1578 }; 1643 };
1579 1644
1580 ipv6_addr_copy(&cfg.fc_dst, pfx); 1645 ipv6_addr_copy(&cfg.fc_dst, pfx);
@@ -1601,7 +1666,7 @@ static void addrconf_add_mroute(struct net_device *dev)
1601 .fc_ifindex = dev->ifindex, 1666 .fc_ifindex = dev->ifindex,
1602 .fc_dst_len = 8, 1667 .fc_dst_len = 8,
1603 .fc_flags = RTF_UP, 1668 .fc_flags = RTF_UP,
1604 .fc_nlinfo.nl_net = &init_net, 1669 .fc_nlinfo.nl_net = dev_net(dev),
1605 }; 1670 };
1606 1671
1607 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); 1672 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
@@ -1618,7 +1683,7 @@ static void sit_route_add(struct net_device *dev)
1618 .fc_ifindex = dev->ifindex, 1683 .fc_ifindex = dev->ifindex,
1619 .fc_dst_len = 96, 1684 .fc_dst_len = 96,
1620 .fc_flags = RTF_UP | RTF_NONEXTHOP, 1685 .fc_flags = RTF_UP | RTF_NONEXTHOP,
1621 .fc_nlinfo.nl_net = &init_net, 1686 .fc_nlinfo.nl_net = dev_net(dev),
1622 }; 1687 };
1623 1688
1624 /* prefix length - 96 bits "::d.d.d.d" */ 1689 /* prefix length - 96 bits "::d.d.d.d" */
@@ -1719,7 +1784,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1719 1784
1720 if (pinfo->onlink) { 1785 if (pinfo->onlink) {
1721 struct rt6_info *rt; 1786 struct rt6_info *rt;
1722 rt = rt6_lookup(&pinfo->prefix, NULL, dev->ifindex, 1); 1787 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL,
1788 dev->ifindex, 1);
1723 1789
1724 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 1790 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) {
1725 if (rt->rt6i_flags&RTF_EXPIRES) { 1791 if (rt->rt6i_flags&RTF_EXPIRES) {
@@ -1762,7 +1828,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1762 1828
1763ok: 1829ok:
1764 1830
1765 ifp = ipv6_get_ifaddr(&init_net, &addr, dev, 1); 1831 ifp = ipv6_get_ifaddr(dev_net(dev), &addr, dev, 1);
1766 1832
1767 if (ifp == NULL && valid_lft) { 1833 if (ifp == NULL && valid_lft) {
1768 int max_addresses = in6_dev->cnf.max_addresses; 1834 int max_addresses = in6_dev->cnf.max_addresses;
@@ -1888,7 +1954,7 @@ ok:
1888 * Special case for SIT interfaces where we create a new "virtual" 1954 * Special case for SIT interfaces where we create a new "virtual"
1889 * device. 1955 * device.
1890 */ 1956 */
1891int addrconf_set_dstaddr(void __user *arg) 1957int addrconf_set_dstaddr(struct net *net, void __user *arg)
1892{ 1958{
1893 struct in6_ifreq ireq; 1959 struct in6_ifreq ireq;
1894 struct net_device *dev; 1960 struct net_device *dev;
@@ -1900,7 +1966,7 @@ int addrconf_set_dstaddr(void __user *arg)
1900 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) 1966 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
1901 goto err_exit; 1967 goto err_exit;
1902 1968
1903 dev = __dev_get_by_index(&init_net, ireq.ifr6_ifindex); 1969 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
1904 1970
1905 err = -ENODEV; 1971 err = -ENODEV;
1906 if (dev == NULL) 1972 if (dev == NULL)
@@ -1931,7 +1997,8 @@ int addrconf_set_dstaddr(void __user *arg)
1931 1997
1932 if (err == 0) { 1998 if (err == 0) {
1933 err = -ENOBUFS; 1999 err = -ENOBUFS;
1934 if ((dev = __dev_get_by_name(&init_net, p.name)) == NULL) 2000 dev = __dev_get_by_name(net, p.name);
2001 if (!dev)
1935 goto err_exit; 2002 goto err_exit;
1936 err = dev_open(dev); 2003 err = dev_open(dev);
1937 } 2004 }
@@ -1946,8 +2013,9 @@ err_exit:
1946/* 2013/*
1947 * Manual configuration of address on an interface 2014 * Manual configuration of address on an interface
1948 */ 2015 */
1949static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, 2016static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
1950 __u8 ifa_flags, __u32 prefered_lft, __u32 valid_lft) 2017 int plen, __u8 ifa_flags, __u32 prefered_lft,
2018 __u32 valid_lft)
1951{ 2019{
1952 struct inet6_ifaddr *ifp; 2020 struct inet6_ifaddr *ifp;
1953 struct inet6_dev *idev; 2021 struct inet6_dev *idev;
@@ -1961,7 +2029,8 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
1961 if (!valid_lft || prefered_lft > valid_lft) 2029 if (!valid_lft || prefered_lft > valid_lft)
1962 return -EINVAL; 2030 return -EINVAL;
1963 2031
1964 if ((dev = __dev_get_by_index(&init_net, ifindex)) == NULL) 2032 dev = __dev_get_by_index(net, ifindex);
2033 if (!dev)
1965 return -ENODEV; 2034 return -ENODEV;
1966 2035
1967 if ((idev = addrconf_add_dev(dev)) == NULL) 2036 if ((idev = addrconf_add_dev(dev)) == NULL)
@@ -2006,13 +2075,15 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
2006 return PTR_ERR(ifp); 2075 return PTR_ERR(ifp);
2007} 2076}
2008 2077
2009static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen) 2078static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
2079 int plen)
2010{ 2080{
2011 struct inet6_ifaddr *ifp; 2081 struct inet6_ifaddr *ifp;
2012 struct inet6_dev *idev; 2082 struct inet6_dev *idev;
2013 struct net_device *dev; 2083 struct net_device *dev;
2014 2084
2015 if ((dev = __dev_get_by_index(&init_net, ifindex)) == NULL) 2085 dev = __dev_get_by_index(net, ifindex);
2086 if (!dev)
2016 return -ENODEV; 2087 return -ENODEV;
2017 2088
2018 if ((idev = __in6_dev_get(dev)) == NULL) 2089 if ((idev = __in6_dev_get(dev)) == NULL)
@@ -2040,7 +2111,7 @@ static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen)
2040} 2111}
2041 2112
2042 2113
2043int addrconf_add_ifaddr(void __user *arg) 2114int addrconf_add_ifaddr(struct net *net, void __user *arg)
2044{ 2115{
2045 struct in6_ifreq ireq; 2116 struct in6_ifreq ireq;
2046 int err; 2117 int err;
@@ -2052,13 +2123,14 @@ int addrconf_add_ifaddr(void __user *arg)
2052 return -EFAULT; 2123 return -EFAULT;
2053 2124
2054 rtnl_lock(); 2125 rtnl_lock();
2055 err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen, 2126 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr,
2056 IFA_F_PERMANENT, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 2127 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2128 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2057 rtnl_unlock(); 2129 rtnl_unlock();
2058 return err; 2130 return err;
2059} 2131}
2060 2132
2061int addrconf_del_ifaddr(void __user *arg) 2133int addrconf_del_ifaddr(struct net *net, void __user *arg)
2062{ 2134{
2063 struct in6_ifreq ireq; 2135 struct in6_ifreq ireq;
2064 int err; 2136 int err;
@@ -2070,7 +2142,8 @@ int addrconf_del_ifaddr(void __user *arg)
2070 return -EFAULT; 2142 return -EFAULT;
2071 2143
2072 rtnl_lock(); 2144 rtnl_lock();
2073 err = inet6_addr_del(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); 2145 err = inet6_addr_del(net, ireq.ifr6_ifindex, &ireq.ifr6_addr,
2146 ireq.ifr6_prefixlen);
2074 rtnl_unlock(); 2147 rtnl_unlock();
2075 return err; 2148 return err;
2076} 2149}
@@ -2081,6 +2154,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2081 struct inet6_ifaddr * ifp; 2154 struct inet6_ifaddr * ifp;
2082 struct in6_addr addr; 2155 struct in6_addr addr;
2083 struct net_device *dev; 2156 struct net_device *dev;
2157 struct net *net = dev_net(idev->dev);
2084 int scope; 2158 int scope;
2085 2159
2086 ASSERT_RTNL(); 2160 ASSERT_RTNL();
@@ -2107,7 +2181,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2107 return; 2181 return;
2108 } 2182 }
2109 2183
2110 for_each_netdev(&init_net, dev) { 2184 for_each_netdev(net, dev) {
2111 struct in_device * in_dev = __in_dev_get_rtnl(dev); 2185 struct in_device * in_dev = __in_dev_get_rtnl(dev);
2112 if (in_dev && (dev->flags & IFF_UP)) { 2186 if (in_dev && (dev->flags & IFF_UP)) {
2113 struct in_ifaddr * ifa; 2187 struct in_ifaddr * ifa;
@@ -2270,15 +2344,16 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
2270static void ip6_tnl_add_linklocal(struct inet6_dev *idev) 2344static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
2271{ 2345{
2272 struct net_device *link_dev; 2346 struct net_device *link_dev;
2347 struct net *net = dev_net(idev->dev);
2273 2348
2274 /* first try to inherit the link-local address from the link device */ 2349 /* first try to inherit the link-local address from the link device */
2275 if (idev->dev->iflink && 2350 if (idev->dev->iflink &&
2276 (link_dev = __dev_get_by_index(&init_net, idev->dev->iflink))) { 2351 (link_dev = __dev_get_by_index(net, idev->dev->iflink))) {
2277 if (!ipv6_inherit_linklocal(idev, link_dev)) 2352 if (!ipv6_inherit_linklocal(idev, link_dev))
2278 return; 2353 return;
2279 } 2354 }
2280 /* then try to inherit it from any device */ 2355 /* then try to inherit it from any device */
2281 for_each_netdev(&init_net, link_dev) { 2356 for_each_netdev(net, link_dev) {
2282 if (!ipv6_inherit_linklocal(idev, link_dev)) 2357 if (!ipv6_inherit_linklocal(idev, link_dev))
2283 return; 2358 return;
2284 } 2359 }
@@ -2311,9 +2386,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2311 int run_pending = 0; 2386 int run_pending = 0;
2312 int err; 2387 int err;
2313 2388
2314 if (dev->nd_net != &init_net)
2315 return NOTIFY_DONE;
2316
2317 switch(event) { 2389 switch(event) {
2318 case NETDEV_REGISTER: 2390 case NETDEV_REGISTER:
2319 if (!idev && dev->mtu >= IPV6_MIN_MTU) { 2391 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
@@ -2453,6 +2525,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2453{ 2525{
2454 struct inet6_dev *idev; 2526 struct inet6_dev *idev;
2455 struct inet6_ifaddr *ifa, **bifa; 2527 struct inet6_ifaddr *ifa, **bifa;
2528 struct net *net = dev_net(dev);
2456 int i; 2529 int i;
2457 2530
2458 ASSERT_RTNL(); 2531 ASSERT_RTNL();
@@ -2460,7 +2533,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2460 if ((dev->flags & IFF_LOOPBACK) && how == 1) 2533 if ((dev->flags & IFF_LOOPBACK) && how == 1)
2461 how = 0; 2534 how = 0;
2462 2535
2463 rt6_ifdown(dev); 2536 rt6_ifdown(net, dev);
2464 neigh_ifdown(&nd_tbl, dev); 2537 neigh_ifdown(&nd_tbl, dev);
2465 2538
2466 idev = __in6_dev_get(dev); 2539 idev = __in6_dev_get(dev);
@@ -2579,8 +2652,6 @@ static void addrconf_rs_timer(unsigned long data)
2579 2652
2580 spin_lock(&ifp->lock); 2653 spin_lock(&ifp->lock);
2581 if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) { 2654 if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) {
2582 struct in6_addr all_routers;
2583
2584 /* The wait after the last probe can be shorter */ 2655 /* The wait after the last probe can be shorter */
2585 addrconf_mod_timer(ifp, AC_RS, 2656 addrconf_mod_timer(ifp, AC_RS,
2586 (ifp->probes == ifp->idev->cnf.rtr_solicits) ? 2657 (ifp->probes == ifp->idev->cnf.rtr_solicits) ?
@@ -2588,9 +2659,7 @@ static void addrconf_rs_timer(unsigned long data)
2588 ifp->idev->cnf.rtr_solicit_interval); 2659 ifp->idev->cnf.rtr_solicit_interval);
2589 spin_unlock(&ifp->lock); 2660 spin_unlock(&ifp->lock);
2590 2661
2591 ipv6_addr_all_routers(&all_routers); 2662 ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
2592
2593 ndisc_send_rs(ifp->idev->dev, &ifp->addr, &all_routers);
2594 } else { 2663 } else {
2595 spin_unlock(&ifp->lock); 2664 spin_unlock(&ifp->lock);
2596 /* 2665 /*
@@ -2677,7 +2746,6 @@ static void addrconf_dad_timer(unsigned long data)
2677{ 2746{
2678 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; 2747 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
2679 struct inet6_dev *idev = ifp->idev; 2748 struct inet6_dev *idev = ifp->idev;
2680 struct in6_addr unspec;
2681 struct in6_addr mcaddr; 2749 struct in6_addr mcaddr;
2682 2750
2683 read_lock_bh(&idev->lock); 2751 read_lock_bh(&idev->lock);
@@ -2706,9 +2774,8 @@ static void addrconf_dad_timer(unsigned long data)
2706 read_unlock_bh(&idev->lock); 2774 read_unlock_bh(&idev->lock);
2707 2775
2708 /* send a neighbour solicitation for our addr */ 2776 /* send a neighbour solicitation for our addr */
2709 memset(&unspec, 0, sizeof(unspec));
2710 addrconf_addr_solict_mult(&ifp->addr, &mcaddr); 2777 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
2711 ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &unspec); 2778 ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
2712out: 2779out:
2713 in6_ifa_put(ifp); 2780 in6_ifa_put(ifp);
2714} 2781}
@@ -2731,16 +2798,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2731 ifp->idev->cnf.rtr_solicits > 0 && 2798 ifp->idev->cnf.rtr_solicits > 0 &&
2732 (dev->flags&IFF_LOOPBACK) == 0 && 2799 (dev->flags&IFF_LOOPBACK) == 0 &&
2733 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { 2800 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
2734 struct in6_addr all_routers;
2735
2736 ipv6_addr_all_routers(&all_routers);
2737
2738 /* 2801 /*
2739 * If a host as already performed a random delay 2802 * If a host as already performed a random delay
2740 * [...] as part of DAD [...] there is no need 2803 * [...] as part of DAD [...] there is no need
2741 * to delay again before sending the first RS 2804 * to delay again before sending the first RS
2742 */ 2805 */
2743 ndisc_send_rs(ifp->idev->dev, &ifp->addr, &all_routers); 2806 ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
2744 2807
2745 spin_lock_bh(&ifp->lock); 2808 spin_lock_bh(&ifp->lock);
2746 ifp->probes = 1; 2809 ifp->probes = 1;
@@ -2776,12 +2839,12 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
2776{ 2839{
2777 struct inet6_ifaddr *ifa = NULL; 2840 struct inet6_ifaddr *ifa = NULL;
2778 struct if6_iter_state *state = seq->private; 2841 struct if6_iter_state *state = seq->private;
2779 struct net *net = state->p.net; 2842 struct net *net = seq_file_net(seq);
2780 2843
2781 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { 2844 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
2782 ifa = inet6_addr_lst[state->bucket]; 2845 ifa = inet6_addr_lst[state->bucket];
2783 2846
2784 while (ifa && ifa->idev->dev->nd_net != net) 2847 while (ifa && !net_eq(dev_net(ifa->idev->dev), net))
2785 ifa = ifa->lst_next; 2848 ifa = ifa->lst_next;
2786 if (ifa) 2849 if (ifa)
2787 break; 2850 break;
@@ -2792,12 +2855,12 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
2792static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa) 2855static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa)
2793{ 2856{
2794 struct if6_iter_state *state = seq->private; 2857 struct if6_iter_state *state = seq->private;
2795 struct net *net = state->p.net; 2858 struct net *net = seq_file_net(seq);
2796 2859
2797 ifa = ifa->lst_next; 2860 ifa = ifa->lst_next;
2798try_again: 2861try_again:
2799 if (ifa) { 2862 if (ifa) {
2800 if (ifa->idev->dev->nd_net != net) { 2863 if (!net_eq(dev_net(ifa->idev->dev), net)) {
2801 ifa = ifa->lst_next; 2864 ifa = ifa->lst_next;
2802 goto try_again; 2865 goto try_again;
2803 } 2866 }
@@ -2915,9 +2978,9 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
2915 u8 hash = ipv6_addr_hash(addr); 2978 u8 hash = ipv6_addr_hash(addr);
2916 read_lock_bh(&addrconf_hash_lock); 2979 read_lock_bh(&addrconf_hash_lock);
2917 for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { 2980 for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) {
2918 if (ifp->idev->dev->nd_net != net) 2981 if (!net_eq(dev_net(ifp->idev->dev), net))
2919 continue; 2982 continue;
2920 if (ipv6_addr_cmp(&ifp->addr, addr) == 0 && 2983 if (ipv6_addr_equal(&ifp->addr, addr) &&
2921 (ifp->flags & IFA_F_HOMEADDRESS)) { 2984 (ifp->flags & IFA_F_HOMEADDRESS)) {
2922 ret = 1; 2985 ret = 1;
2923 break; 2986 break;
@@ -3064,15 +3127,12 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
3064static int 3127static int
3065inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 3128inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3066{ 3129{
3067 struct net *net = skb->sk->sk_net; 3130 struct net *net = sock_net(skb->sk);
3068 struct ifaddrmsg *ifm; 3131 struct ifaddrmsg *ifm;
3069 struct nlattr *tb[IFA_MAX+1]; 3132 struct nlattr *tb[IFA_MAX+1];
3070 struct in6_addr *pfx; 3133 struct in6_addr *pfx;
3071 int err; 3134 int err;
3072 3135
3073 if (net != &init_net)
3074 return -EINVAL;
3075
3076 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3136 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
3077 if (err < 0) 3137 if (err < 0)
3078 return err; 3138 return err;
@@ -3082,7 +3142,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3082 if (pfx == NULL) 3142 if (pfx == NULL)
3083 return -EINVAL; 3143 return -EINVAL;
3084 3144
3085 return inet6_addr_del(ifm->ifa_index, pfx, ifm->ifa_prefixlen); 3145 return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen);
3086} 3146}
3087 3147
3088static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, 3148static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
@@ -3125,7 +3185,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
3125static int 3185static int
3126inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 3186inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3127{ 3187{
3128 struct net *net = skb->sk->sk_net; 3188 struct net *net = sock_net(skb->sk);
3129 struct ifaddrmsg *ifm; 3189 struct ifaddrmsg *ifm;
3130 struct nlattr *tb[IFA_MAX+1]; 3190 struct nlattr *tb[IFA_MAX+1];
3131 struct in6_addr *pfx; 3191 struct in6_addr *pfx;
@@ -3135,9 +3195,6 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3135 u8 ifa_flags; 3195 u8 ifa_flags;
3136 int err; 3196 int err;
3137 3197
3138 if (net != &init_net)
3139 return -EINVAL;
3140
3141 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3198 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
3142 if (err < 0) 3199 if (err < 0)
3143 return err; 3200 return err;
@@ -3158,7 +3215,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3158 valid_lft = INFINITY_LIFE_TIME; 3215 valid_lft = INFINITY_LIFE_TIME;
3159 } 3216 }
3160 3217
3161 dev = __dev_get_by_index(&init_net, ifm->ifa_index); 3218 dev = __dev_get_by_index(net, ifm->ifa_index);
3162 if (dev == NULL) 3219 if (dev == NULL)
3163 return -ENODEV; 3220 return -ENODEV;
3164 3221
@@ -3171,8 +3228,9 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3171 * It would be best to check for !NLM_F_CREATE here but 3228 * It would be best to check for !NLM_F_CREATE here but
3172 * userspace alreay relies on not having to provide this. 3229 * userspace alreay relies on not having to provide this.
3173 */ 3230 */
3174 return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen, 3231 return inet6_addr_add(net, ifm->ifa_index, pfx,
3175 ifa_flags, preferred_lft, valid_lft); 3232 ifm->ifa_prefixlen, ifa_flags,
3233 preferred_lft, valid_lft);
3176 } 3234 }
3177 3235
3178 if (nlh->nlmsg_flags & NLM_F_EXCL || 3236 if (nlh->nlmsg_flags & NLM_F_EXCL ||
@@ -3337,12 +3395,13 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3337 struct inet6_ifaddr *ifa; 3395 struct inet6_ifaddr *ifa;
3338 struct ifmcaddr6 *ifmca; 3396 struct ifmcaddr6 *ifmca;
3339 struct ifacaddr6 *ifaca; 3397 struct ifacaddr6 *ifaca;
3398 struct net *net = sock_net(skb->sk);
3340 3399
3341 s_idx = cb->args[0]; 3400 s_idx = cb->args[0];
3342 s_ip_idx = ip_idx = cb->args[1]; 3401 s_ip_idx = ip_idx = cb->args[1];
3343 3402
3344 idx = 0; 3403 idx = 0;
3345 for_each_netdev(&init_net, dev) { 3404 for_each_netdev(net, dev) {
3346 if (idx < s_idx) 3405 if (idx < s_idx)
3347 goto cont; 3406 goto cont;
3348 if (idx > s_idx) 3407 if (idx > s_idx)
@@ -3409,42 +3468,30 @@ cont:
3409 3468
3410static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 3469static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
3411{ 3470{
3412 struct net *net = skb->sk->sk_net;
3413 enum addr_type_t type = UNICAST_ADDR; 3471 enum addr_type_t type = UNICAST_ADDR;
3414 3472
3415 if (net != &init_net)
3416 return 0;
3417
3418 return inet6_dump_addr(skb, cb, type); 3473 return inet6_dump_addr(skb, cb, type);
3419} 3474}
3420 3475
3421static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb) 3476static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
3422{ 3477{
3423 struct net *net = skb->sk->sk_net;
3424 enum addr_type_t type = MULTICAST_ADDR; 3478 enum addr_type_t type = MULTICAST_ADDR;
3425 3479
3426 if (net != &init_net)
3427 return 0;
3428
3429 return inet6_dump_addr(skb, cb, type); 3480 return inet6_dump_addr(skb, cb, type);
3430} 3481}
3431 3482
3432 3483
3433static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) 3484static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
3434{ 3485{
3435 struct net *net = skb->sk->sk_net;
3436 enum addr_type_t type = ANYCAST_ADDR; 3486 enum addr_type_t type = ANYCAST_ADDR;
3437 3487
3438 if (net != &init_net)
3439 return 0;
3440
3441 return inet6_dump_addr(skb, cb, type); 3488 return inet6_dump_addr(skb, cb, type);
3442} 3489}
3443 3490
3444static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, 3491static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3445 void *arg) 3492 void *arg)
3446{ 3493{
3447 struct net *net = in_skb->sk->sk_net; 3494 struct net *net = sock_net(in_skb->sk);
3448 struct ifaddrmsg *ifm; 3495 struct ifaddrmsg *ifm;
3449 struct nlattr *tb[IFA_MAX+1]; 3496 struct nlattr *tb[IFA_MAX+1];
3450 struct in6_addr *addr = NULL; 3497 struct in6_addr *addr = NULL;
@@ -3453,9 +3500,6 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3453 struct sk_buff *skb; 3500 struct sk_buff *skb;
3454 int err; 3501 int err;
3455 3502
3456 if (net != &init_net)
3457 return -EINVAL;
3458
3459 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3503 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
3460 if (err < 0) 3504 if (err < 0)
3461 goto errout; 3505 goto errout;
@@ -3468,7 +3512,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3468 3512
3469 ifm = nlmsg_data(nlh); 3513 ifm = nlmsg_data(nlh);
3470 if (ifm->ifa_index) 3514 if (ifm->ifa_index)
3471 dev = __dev_get_by_index(&init_net, ifm->ifa_index); 3515 dev = __dev_get_by_index(net, ifm->ifa_index);
3472 3516
3473 if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { 3517 if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) {
3474 err = -EADDRNOTAVAIL; 3518 err = -EADDRNOTAVAIL;
@@ -3488,7 +3532,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3488 kfree_skb(skb); 3532 kfree_skb(skb);
3489 goto errout_ifa; 3533 goto errout_ifa;
3490 } 3534 }
3491 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 3535 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
3492errout_ifa: 3536errout_ifa:
3493 in6_ifa_put(ifa); 3537 in6_ifa_put(ifa);
3494errout: 3538errout:
@@ -3498,6 +3542,7 @@ errout:
3498static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) 3542static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
3499{ 3543{
3500 struct sk_buff *skb; 3544 struct sk_buff *skb;
3545 struct net *net = dev_net(ifa->idev->dev);
3501 int err = -ENOBUFS; 3546 int err = -ENOBUFS;
3502 3547
3503 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); 3548 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
@@ -3511,10 +3556,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
3511 kfree_skb(skb); 3556 kfree_skb(skb);
3512 goto errout; 3557 goto errout;
3513 } 3558 }
3514 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 3559 err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
3515errout: 3560errout:
3516 if (err < 0) 3561 if (err < 0)
3517 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_IFADDR, err); 3562 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
3518} 3563}
3519 3564
3520static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, 3565static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
@@ -3556,6 +3601,9 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3556#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 3601#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3557 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad; 3602 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
3558#endif 3603#endif
3604#ifdef CONFIG_IPV6_MROUTE
3605 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
3606#endif
3559} 3607}
3560 3608
3561static inline size_t inet6_if_nlmsg_size(void) 3609static inline size_t inet6_if_nlmsg_size(void)
@@ -3673,18 +3721,15 @@ nla_put_failure:
3673 3721
3674static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 3722static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3675{ 3723{
3676 struct net *net = skb->sk->sk_net; 3724 struct net *net = sock_net(skb->sk);
3677 int idx, err; 3725 int idx, err;
3678 int s_idx = cb->args[0]; 3726 int s_idx = cb->args[0];
3679 struct net_device *dev; 3727 struct net_device *dev;
3680 struct inet6_dev *idev; 3728 struct inet6_dev *idev;
3681 3729
3682 if (net != &init_net)
3683 return 0;
3684
3685 read_lock(&dev_base_lock); 3730 read_lock(&dev_base_lock);
3686 idx = 0; 3731 idx = 0;
3687 for_each_netdev(&init_net, dev) { 3732 for_each_netdev(net, dev) {
3688 if (idx < s_idx) 3733 if (idx < s_idx)
3689 goto cont; 3734 goto cont;
3690 if ((idev = in6_dev_get(dev)) == NULL) 3735 if ((idev = in6_dev_get(dev)) == NULL)
@@ -3706,6 +3751,7 @@ cont:
3706void inet6_ifinfo_notify(int event, struct inet6_dev *idev) 3751void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
3707{ 3752{
3708 struct sk_buff *skb; 3753 struct sk_buff *skb;
3754 struct net *net = dev_net(idev->dev);
3709 int err = -ENOBUFS; 3755 int err = -ENOBUFS;
3710 3756
3711 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); 3757 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
@@ -3719,10 +3765,10 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
3719 kfree_skb(skb); 3765 kfree_skb(skb);
3720 goto errout; 3766 goto errout;
3721 } 3767 }
3722 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 3768 err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
3723errout: 3769errout:
3724 if (err < 0) 3770 if (err < 0)
3725 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_IFADDR, err); 3771 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
3726} 3772}
3727 3773
3728static inline size_t inet6_prefix_nlmsg_size(void) 3774static inline size_t inet6_prefix_nlmsg_size(void)
@@ -3775,6 +3821,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
3775 struct prefix_info *pinfo) 3821 struct prefix_info *pinfo)
3776{ 3822{
3777 struct sk_buff *skb; 3823 struct sk_buff *skb;
3824 struct net *net = dev_net(idev->dev);
3778 int err = -ENOBUFS; 3825 int err = -ENOBUFS;
3779 3826
3780 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC); 3827 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
@@ -3788,10 +3835,10 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
3788 kfree_skb(skb); 3835 kfree_skb(skb);
3789 goto errout; 3836 goto errout;
3790 } 3837 }
3791 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); 3838 err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
3792errout: 3839errout:
3793 if (err < 0) 3840 if (err < 0)
3794 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_PREFIX, err); 3841 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
3795} 3842}
3796 3843
3797static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 3844static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -3887,7 +3934,7 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
3887static struct addrconf_sysctl_table 3934static struct addrconf_sysctl_table
3888{ 3935{
3889 struct ctl_table_header *sysctl_header; 3936 struct ctl_table_header *sysctl_header;
3890 ctl_table addrconf_vars[__NET_IPV6_MAX]; 3937 ctl_table addrconf_vars[DEVCONF_MAX+1];
3891 char *dev_name; 3938 char *dev_name;
3892} addrconf_sysctl __read_mostly = { 3939} addrconf_sysctl __read_mostly = {
3893 .sysctl_header = NULL, 3940 .sysctl_header = NULL,
@@ -4105,6 +4152,16 @@ static struct addrconf_sysctl_table
4105 4152
4106 }, 4153 },
4107#endif 4154#endif
4155#ifdef CONFIG_IPV6_MROUTE
4156 {
4157 .ctl_name = CTL_UNNUMBERED,
4158 .procname = "mc_forwarding",
4159 .data = &ipv6_devconf.mc_forwarding,
4160 .maxlen = sizeof(int),
4161 .mode = 0644,
4162 .proc_handler = &proc_dointvec,
4163 },
4164#endif
4108 { 4165 {
4109 .ctl_name = 0, /* sentinel */ 4166 .ctl_name = 0, /* sentinel */
4110 } 4167 }
@@ -4186,7 +4243,7 @@ static void addrconf_sysctl_register(struct inet6_dev *idev)
4186 NET_IPV6_NEIGH, "ipv6", 4243 NET_IPV6_NEIGH, "ipv6",
4187 &ndisc_ifinfo_sysctl_change, 4244 &ndisc_ifinfo_sysctl_change,
4188 NULL); 4245 NULL);
4189 __addrconf_sysctl_register(idev->dev->nd_net, idev->dev->name, 4246 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
4190 idev->dev->ifindex, idev, &idev->cnf); 4247 idev->dev->ifindex, idev, &idev->cnf);
4191} 4248}
4192 4249
@@ -4281,6 +4338,32 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
4281 4338
4282EXPORT_SYMBOL(unregister_inet6addr_notifier); 4339EXPORT_SYMBOL(unregister_inet6addr_notifier);
4283 4340
4341
4342static int addrconf_net_init(struct net *net)
4343{
4344 return 0;
4345}
4346
4347static void addrconf_net_exit(struct net *net)
4348{
4349 struct net_device *dev;
4350
4351 rtnl_lock();
4352 /* clean dev list */
4353 for_each_netdev(net, dev) {
4354 if (__in6_dev_get(dev) == NULL)
4355 continue;
4356 addrconf_ifdown(dev, 1);
4357 }
4358 addrconf_ifdown(net->loopback_dev, 2);
4359 rtnl_unlock();
4360}
4361
4362static struct pernet_operations addrconf_net_ops = {
4363 .init = addrconf_net_init,
4364 .exit = addrconf_net_exit,
4365};
4366
4284/* 4367/*
4285 * Init / cleanup code 4368 * Init / cleanup code
4286 */ 4369 */
@@ -4322,14 +4405,9 @@ int __init addrconf_init(void)
4322 if (err) 4405 if (err)
4323 goto errlo; 4406 goto errlo;
4324 4407
4325 ip6_null_entry.u.dst.dev = init_net.loopback_dev; 4408 err = register_pernet_device(&addrconf_net_ops);
4326 ip6_null_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev); 4409 if (err)
4327#ifdef CONFIG_IPV6_MULTIPLE_TABLES 4410 return err;
4328 ip6_prohibit_entry.u.dst.dev = init_net.loopback_dev;
4329 ip6_prohibit_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev);
4330 ip6_blk_hole_entry.u.dst.dev = init_net.loopback_dev;
4331 ip6_blk_hole_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev);
4332#endif
4333 4411
4334 register_netdevice_notifier(&ipv6_dev_notf); 4412 register_netdevice_notifier(&ipv6_dev_notf);
4335 4413
@@ -4359,31 +4437,19 @@ errlo:
4359 4437
4360void addrconf_cleanup(void) 4438void addrconf_cleanup(void)
4361{ 4439{
4362 struct net_device *dev;
4363 struct inet6_ifaddr *ifa; 4440 struct inet6_ifaddr *ifa;
4364 int i; 4441 int i;
4365 4442
4366 unregister_netdevice_notifier(&ipv6_dev_notf); 4443 unregister_netdevice_notifier(&ipv6_dev_notf);
4444 unregister_pernet_device(&addrconf_net_ops);
4367 4445
4368 unregister_pernet_subsys(&addrconf_ops); 4446 unregister_pernet_subsys(&addrconf_ops);
4369 4447
4370 rtnl_lock(); 4448 rtnl_lock();
4371 4449
4372 /* 4450 /*
4373 * clean dev list.
4374 */
4375
4376 for_each_netdev(&init_net, dev) {
4377 if (__in6_dev_get(dev) == NULL)
4378 continue;
4379 addrconf_ifdown(dev, 1);
4380 }
4381 addrconf_ifdown(init_net.loopback_dev, 2);
4382
4383 /*
4384 * Check hash table. 4451 * Check hash table.
4385 */ 4452 */
4386
4387 write_lock_bh(&addrconf_hash_lock); 4453 write_lock_bh(&addrconf_hash_lock);
4388 for (i=0; i < IN6_ADDR_HSIZE; i++) { 4454 for (i=0; i < IN6_ADDR_HSIZE; i++) {
4389 for (ifa=inet6_addr_lst[i]; ifa; ) { 4455 for (ifa=inet6_addr_lst[i]; ifa; ) {
@@ -4400,6 +4466,7 @@ void addrconf_cleanup(void)
4400 write_unlock_bh(&addrconf_hash_lock); 4466 write_unlock_bh(&addrconf_hash_lock);
4401 4467
4402 del_timer(&addr_chk_timer); 4468 del_timer(&addr_chk_timer);
4403
4404 rtnl_unlock(); 4469 rtnl_unlock();
4470
4471 unregister_pernet_subsys(&addrconf_net_ops);
4405} 4472}
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index a3c5a72218fd..9bfa8846f262 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -58,6 +58,7 @@ static struct ip6addrlbl_table
58 * ::ffff:0:0/96 V4MAPPED 4 58 * ::ffff:0:0/96 V4MAPPED 4
59 * fc00::/7 N/A 5 ULA (RFC 4193) 59 * fc00::/7 N/A 5 ULA (RFC 4193)
60 * 2001::/32 N/A 6 Teredo (RFC 4380) 60 * 2001::/32 N/A 6 Teredo (RFC 4380)
61 * 2001:10::/28 N/A 7 ORCHID (RFC 4843)
61 * 62 *
62 * Note: 0xffffffff is used if we do not have any policies. 63 * Note: 0xffffffff is used if we do not have any policies.
63 */ 64 */
@@ -85,6 +86,10 @@ static const __initdata struct ip6addrlbl_init_table
85 .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}}, 86 .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}},
86 .prefixlen = 32, 87 .prefixlen = 32,
87 .label = 6, 88 .label = 6,
89 },{ /* 2001:10::/28 */
90 .prefix = &(struct in6_addr){{{ 0x20, 0x01, 0x00, 0x10 }}},
91 .prefixlen = 28,
92 .label = 7,
88 },{ /* ::ffff:0:0 */ 93 },{ /* ::ffff:0:0 */
89 .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}}, 94 .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}},
90 .prefixlen = 96, 95 .prefixlen = 96,
@@ -161,7 +166,7 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex)
161 rcu_read_unlock(); 166 rcu_read_unlock();
162 167
163 ADDRLABEL(KERN_DEBUG "%s(addr=" NIP6_FMT ", type=%d, ifindex=%d) => %08x\n", 168 ADDRLABEL(KERN_DEBUG "%s(addr=" NIP6_FMT ", type=%d, ifindex=%d) => %08x\n",
164 __FUNCTION__, 169 __func__,
165 NIP6(*addr), type, ifindex, 170 NIP6(*addr), type, ifindex,
166 label); 171 label);
167 172
@@ -177,7 +182,7 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix,
177 int addrtype; 182 int addrtype;
178 183
179 ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d, label=%u)\n", 184 ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d, label=%u)\n",
180 __FUNCTION__, 185 __func__,
181 NIP6(*prefix), prefixlen, 186 NIP6(*prefix), prefixlen,
182 ifindex, 187 ifindex,
183 (unsigned int)label); 188 (unsigned int)label);
@@ -221,7 +226,7 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
221 int ret = 0; 226 int ret = 0;
222 227
223 ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", 228 ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n",
224 __FUNCTION__, 229 __func__,
225 newp, replace); 230 newp, replace);
226 231
227 if (hlist_empty(&ip6addrlbl_table.head)) { 232 if (hlist_empty(&ip6addrlbl_table.head)) {
@@ -263,7 +268,7 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen,
263 int ret = 0; 268 int ret = 0;
264 269
265 ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n", 270 ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n",
266 __FUNCTION__, 271 __func__,
267 NIP6(*prefix), prefixlen, 272 NIP6(*prefix), prefixlen,
268 ifindex, 273 ifindex,
269 (unsigned int)label, 274 (unsigned int)label,
@@ -289,7 +294,7 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
289 int ret = -ESRCH; 294 int ret = -ESRCH;
290 295
291 ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d)\n", 296 ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d)\n",
292 __FUNCTION__, 297 __func__,
293 NIP6(*prefix), prefixlen, 298 NIP6(*prefix), prefixlen,
294 ifindex); 299 ifindex);
295 300
@@ -313,7 +318,7 @@ static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
313 int ret; 318 int ret;
314 319
315 ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d)\n", 320 ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d)\n",
316 __FUNCTION__, 321 __func__,
317 NIP6(*prefix), prefixlen, 322 NIP6(*prefix), prefixlen,
318 ifindex); 323 ifindex);
319 324
@@ -330,7 +335,7 @@ static __init int ip6addrlbl_init(void)
330 int err = 0; 335 int err = 0;
331 int i; 336 int i;
332 337
333 ADDRLABEL(KERN_DEBUG "%s()\n", __FUNCTION__); 338 ADDRLABEL(KERN_DEBUG "%s()\n", __func__);
334 339
335 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { 340 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
336 int ret = ip6addrlbl_add(ip6addrlbl_init_table[i].prefix, 341 int ret = ip6addrlbl_add(ip6addrlbl_init_table[i].prefix,
@@ -359,7 +364,7 @@ static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
359static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, 364static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
360 void *arg) 365 void *arg)
361{ 366{
362 struct net *net = skb->sk->sk_net; 367 struct net *net = sock_net(skb->sk);
363 struct ifaddrlblmsg *ifal; 368 struct ifaddrlblmsg *ifal;
364 struct nlattr *tb[IFAL_MAX+1]; 369 struct nlattr *tb[IFAL_MAX+1];
365 struct in6_addr *pfx; 370 struct in6_addr *pfx;
@@ -447,7 +452,7 @@ static int ip6addrlbl_fill(struct sk_buff *skb,
447 452
448static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) 453static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
449{ 454{
450 struct net *net = skb->sk->sk_net; 455 struct net *net = sock_net(skb->sk);
451 struct ip6addrlbl_entry *p; 456 struct ip6addrlbl_entry *p;
452 struct hlist_node *pos; 457 struct hlist_node *pos;
453 int idx = 0, s_idx = cb->args[0]; 458 int idx = 0, s_idx = cb->args[0];
@@ -485,7 +490,7 @@ static inline int ip6addrlbl_msgsize(void)
485static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, 490static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
486 void *arg) 491 void *arg)
487{ 492{
488 struct net *net = in_skb->sk->sk_net; 493 struct net *net = sock_net(in_skb->sk);
489 struct ifaddrlblmsg *ifal; 494 struct ifaddrlblmsg *ifal;
490 struct nlattr *tb[IFAL_MAX+1]; 495 struct nlattr *tb[IFAL_MAX+1];
491 struct in6_addr *addr; 496 struct in6_addr *addr;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index f0aa97738746..3c6aafb02183 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -61,6 +61,9 @@
61 61
62#include <asm/uaccess.h> 62#include <asm/uaccess.h>
63#include <asm/system.h> 63#include <asm/system.h>
64#ifdef CONFIG_IPV6_MROUTE
65#include <linux/mroute6.h>
66#endif
64 67
65MODULE_AUTHOR("Cast of dozens"); 68MODULE_AUTHOR("Cast of dozens");
66MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); 69MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
@@ -92,9 +95,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol)
92 int try_loading_module = 0; 95 int try_loading_module = 0;
93 int err; 96 int err;
94 97
95 if (net != &init_net)
96 return -EAFNOSUPPORT;
97
98 if (sock->type != SOCK_RAW && 98 if (sock->type != SOCK_RAW &&
99 sock->type != SOCK_DGRAM && 99 sock->type != SOCK_DGRAM &&
100 !inet_ehash_secret) 100 !inet_ehash_secret)
@@ -248,6 +248,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
248 struct sock *sk = sock->sk; 248 struct sock *sk = sock->sk;
249 struct inet_sock *inet = inet_sk(sk); 249 struct inet_sock *inet = inet_sk(sk);
250 struct ipv6_pinfo *np = inet6_sk(sk); 250 struct ipv6_pinfo *np = inet6_sk(sk);
251 struct net *net = sock_net(sk);
251 __be32 v4addr = 0; 252 __be32 v4addr = 0;
252 unsigned short snum; 253 unsigned short snum;
253 int addr_type = 0; 254 int addr_type = 0;
@@ -278,7 +279,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
278 /* Check if the address belongs to the host. */ 279 /* Check if the address belongs to the host. */
279 if (addr_type == IPV6_ADDR_MAPPED) { 280 if (addr_type == IPV6_ADDR_MAPPED) {
280 v4addr = addr->sin6_addr.s6_addr32[3]; 281 v4addr = addr->sin6_addr.s6_addr32[3];
281 if (inet_addr_type(&init_net, v4addr) != RTN_LOCAL) { 282 if (inet_addr_type(net, v4addr) != RTN_LOCAL) {
282 err = -EADDRNOTAVAIL; 283 err = -EADDRNOTAVAIL;
283 goto out; 284 goto out;
284 } 285 }
@@ -300,7 +301,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
300 err = -EINVAL; 301 err = -EINVAL;
301 goto out; 302 goto out;
302 } 303 }
303 dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); 304 dev = dev_get_by_index(net, sk->sk_bound_dev_if);
304 if (!dev) { 305 if (!dev) {
305 err = -ENODEV; 306 err = -ENODEV;
306 goto out; 307 goto out;
@@ -312,7 +313,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
312 */ 313 */
313 v4addr = LOOPBACK4_IPV6; 314 v4addr = LOOPBACK4_IPV6;
314 if (!(addr_type & IPV6_ADDR_MULTICAST)) { 315 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
315 if (!ipv6_chk_addr(&init_net, &addr->sin6_addr, 316 if (!ipv6_chk_addr(net, &addr->sin6_addr,
316 dev, 0)) { 317 dev, 0)) {
317 if (dev) 318 if (dev)
318 dev_put(dev); 319 dev_put(dev);
@@ -440,6 +441,7 @@ EXPORT_SYMBOL(inet6_getname);
440int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 441int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
441{ 442{
442 struct sock *sk = sock->sk; 443 struct sock *sk = sock->sk;
444 struct net *net = sock_net(sk);
443 445
444 switch(cmd) 446 switch(cmd)
445 { 447 {
@@ -452,14 +454,14 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
452 case SIOCADDRT: 454 case SIOCADDRT:
453 case SIOCDELRT: 455 case SIOCDELRT:
454 456
455 return(ipv6_route_ioctl(cmd,(void __user *)arg)); 457 return(ipv6_route_ioctl(net, cmd, (void __user *)arg));
456 458
457 case SIOCSIFADDR: 459 case SIOCSIFADDR:
458 return addrconf_add_ifaddr((void __user *) arg); 460 return addrconf_add_ifaddr(net, (void __user *) arg);
459 case SIOCDIFADDR: 461 case SIOCDIFADDR:
460 return addrconf_del_ifaddr((void __user *) arg); 462 return addrconf_del_ifaddr(net, (void __user *) arg);
461 case SIOCSIFDSTADDR: 463 case SIOCSIFDSTADDR:
462 return addrconf_set_dstaddr((void __user *) arg); 464 return addrconf_set_dstaddr(net, (void __user *) arg);
463 default: 465 default:
464 if (!sk->sk_prot->ioctl) 466 if (!sk->sk_prot->ioctl)
465 return -ENOIOCTLCMD; 467 return -ENOIOCTLCMD;
@@ -678,6 +680,129 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
678 680
679EXPORT_SYMBOL_GPL(ipv6_opt_accepted); 681EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
680 682
683static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
684 int proto)
685{
686 struct inet6_protocol *ops = NULL;
687
688 for (;;) {
689 struct ipv6_opt_hdr *opth;
690 int len;
691
692 if (proto != NEXTHDR_HOP) {
693 ops = rcu_dereference(inet6_protos[proto]);
694
695 if (unlikely(!ops))
696 break;
697
698 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
699 break;
700 }
701
702 if (unlikely(!pskb_may_pull(skb, 8)))
703 break;
704
705 opth = (void *)skb->data;
706 len = ipv6_optlen(opth);
707
708 if (unlikely(!pskb_may_pull(skb, len)))
709 break;
710
711 proto = opth->nexthdr;
712 __skb_pull(skb, len);
713 }
714
715 return ops;
716}
717
718static int ipv6_gso_send_check(struct sk_buff *skb)
719{
720 struct ipv6hdr *ipv6h;
721 struct inet6_protocol *ops;
722 int err = -EINVAL;
723
724 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
725 goto out;
726
727 ipv6h = ipv6_hdr(skb);
728 __skb_pull(skb, sizeof(*ipv6h));
729 err = -EPROTONOSUPPORT;
730
731 rcu_read_lock();
732 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
733 if (likely(ops && ops->gso_send_check)) {
734 skb_reset_transport_header(skb);
735 err = ops->gso_send_check(skb);
736 }
737 rcu_read_unlock();
738
739out:
740 return err;
741}
742
743static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
744{
745 struct sk_buff *segs = ERR_PTR(-EINVAL);
746 struct ipv6hdr *ipv6h;
747 struct inet6_protocol *ops;
748
749 if (!(features & NETIF_F_V6_CSUM))
750 features &= ~NETIF_F_SG;
751
752 if (unlikely(skb_shinfo(skb)->gso_type &
753 ~(SKB_GSO_UDP |
754 SKB_GSO_DODGY |
755 SKB_GSO_TCP_ECN |
756 SKB_GSO_TCPV6 |
757 0)))
758 goto out;
759
760 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
761 goto out;
762
763 ipv6h = ipv6_hdr(skb);
764 __skb_pull(skb, sizeof(*ipv6h));
765 segs = ERR_PTR(-EPROTONOSUPPORT);
766
767 rcu_read_lock();
768 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
769 if (likely(ops && ops->gso_segment)) {
770 skb_reset_transport_header(skb);
771 segs = ops->gso_segment(skb, features);
772 }
773 rcu_read_unlock();
774
775 if (unlikely(IS_ERR(segs)))
776 goto out;
777
778 for (skb = segs; skb; skb = skb->next) {
779 ipv6h = ipv6_hdr(skb);
780 ipv6h->payload_len = htons(skb->len - skb->mac_len -
781 sizeof(*ipv6h));
782 }
783
784out:
785 return segs;
786}
787
788static struct packet_type ipv6_packet_type = {
789 .type = __constant_htons(ETH_P_IPV6),
790 .func = ipv6_rcv,
791 .gso_send_check = ipv6_gso_send_check,
792 .gso_segment = ipv6_gso_segment,
793};
794
795static int __init ipv6_packet_init(void)
796{
797 dev_add_pack(&ipv6_packet_type);
798 return 0;
799}
800
801static void ipv6_packet_cleanup(void)
802{
803 dev_remove_pack(&ipv6_packet_type);
804}
805
681static int __init init_ipv6_mibs(void) 806static int __init init_ipv6_mibs(void)
682{ 807{
683 if (snmp_mib_init((void **)ipv6_statistics, 808 if (snmp_mib_init((void **)ipv6_statistics,
@@ -720,6 +845,8 @@ static void cleanup_ipv6_mibs(void)
720 845
721static int inet6_net_init(struct net *net) 846static int inet6_net_init(struct net *net)
722{ 847{
848 int err = 0;
849
723 net->ipv6.sysctl.bindv6only = 0; 850 net->ipv6.sysctl.bindv6only = 0;
724 net->ipv6.sysctl.flush_delay = 0; 851 net->ipv6.sysctl.flush_delay = 0;
725 net->ipv6.sysctl.ip6_rt_max_size = 4096; 852 net->ipv6.sysctl.ip6_rt_max_size = 4096;
@@ -731,12 +858,36 @@ static int inet6_net_init(struct net *net)
731 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; 858 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
732 net->ipv6.sysctl.icmpv6_time = 1*HZ; 859 net->ipv6.sysctl.icmpv6_time = 1*HZ;
733 860
734 return 0; 861#ifdef CONFIG_PROC_FS
862 err = udp6_proc_init(net);
863 if (err)
864 goto out;
865 err = tcp6_proc_init(net);
866 if (err)
867 goto proc_tcp6_fail;
868 err = ac6_proc_init(net);
869 if (err)
870 goto proc_ac6_fail;
871out:
872#endif
873 return err;
874
875#ifdef CONFIG_PROC_FS
876proc_ac6_fail:
877 tcp6_proc_exit(net);
878proc_tcp6_fail:
879 udp6_proc_exit(net);
880 goto out;
881#endif
735} 882}
736 883
737static void inet6_net_exit(struct net *net) 884static void inet6_net_exit(struct net *net)
738{ 885{
739 return; 886#ifdef CONFIG_PROC_FS
887 udp6_proc_exit(net);
888 tcp6_proc_exit(net);
889 ac6_proc_exit(net);
890#endif
740} 891}
741 892
742static struct pernet_operations inet6_net_ops = { 893static struct pernet_operations inet6_net_ops = {
@@ -802,19 +953,16 @@ static int __init inet6_init(void)
802 err = register_pernet_subsys(&inet6_net_ops); 953 err = register_pernet_subsys(&inet6_net_ops);
803 if (err) 954 if (err)
804 goto register_pernet_fail; 955 goto register_pernet_fail;
805 956 err = icmpv6_init();
806#ifdef CONFIG_SYSCTL
807 err = ipv6_sysctl_register();
808 if (err)
809 goto sysctl_fail;
810#endif
811 err = icmpv6_init(&inet6_family_ops);
812 if (err) 957 if (err)
813 goto icmp_fail; 958 goto icmp_fail;
814 err = ndisc_init(&inet6_family_ops); 959#ifdef CONFIG_IPV6_MROUTE
960 ip6_mr_init();
961#endif
962 err = ndisc_init();
815 if (err) 963 if (err)
816 goto ndisc_fail; 964 goto ndisc_fail;
817 err = igmp6_init(&inet6_family_ops); 965 err = igmp6_init();
818 if (err) 966 if (err)
819 goto igmp_fail; 967 goto igmp_fail;
820 err = ipv6_netfilter_init(); 968 err = ipv6_netfilter_init();
@@ -825,17 +973,10 @@ static int __init inet6_init(void)
825 err = -ENOMEM; 973 err = -ENOMEM;
826 if (raw6_proc_init()) 974 if (raw6_proc_init())
827 goto proc_raw6_fail; 975 goto proc_raw6_fail;
828 if (tcp6_proc_init())
829 goto proc_tcp6_fail;
830 if (udp6_proc_init())
831 goto proc_udp6_fail;
832 if (udplite6_proc_init()) 976 if (udplite6_proc_init())
833 goto proc_udplite6_fail; 977 goto proc_udplite6_fail;
834 if (ipv6_misc_proc_init()) 978 if (ipv6_misc_proc_init())
835 goto proc_misc6_fail; 979 goto proc_misc6_fail;
836
837 if (ac6_proc_init())
838 goto proc_anycast6_fail;
839 if (if6_proc_init()) 980 if (if6_proc_init())
840 goto proc_if6_fail; 981 goto proc_if6_fail;
841#endif 982#endif
@@ -874,9 +1015,19 @@ static int __init inet6_init(void)
874 err = ipv6_packet_init(); 1015 err = ipv6_packet_init();
875 if (err) 1016 if (err)
876 goto ipv6_packet_fail; 1017 goto ipv6_packet_fail;
1018
1019#ifdef CONFIG_SYSCTL
1020 err = ipv6_sysctl_register();
1021 if (err)
1022 goto sysctl_fail;
1023#endif
877out: 1024out:
878 return err; 1025 return err;
879 1026
1027#ifdef CONFIG_SYSCTL
1028sysctl_fail:
1029 ipv6_packet_cleanup();
1030#endif
880ipv6_packet_fail: 1031ipv6_packet_fail:
881 tcpv6_exit(); 1032 tcpv6_exit();
882tcpv6_fail: 1033tcpv6_fail:
@@ -897,16 +1048,10 @@ ip6_route_fail:
897#ifdef CONFIG_PROC_FS 1048#ifdef CONFIG_PROC_FS
898 if6_proc_exit(); 1049 if6_proc_exit();
899proc_if6_fail: 1050proc_if6_fail:
900 ac6_proc_exit();
901proc_anycast6_fail:
902 ipv6_misc_proc_exit(); 1051 ipv6_misc_proc_exit();
903proc_misc6_fail: 1052proc_misc6_fail:
904 udplite6_proc_exit(); 1053 udplite6_proc_exit();
905proc_udplite6_fail: 1054proc_udplite6_fail:
906 udp6_proc_exit();
907proc_udp6_fail:
908 tcp6_proc_exit();
909proc_tcp6_fail:
910 raw6_proc_exit(); 1055 raw6_proc_exit();
911proc_raw6_fail: 1056proc_raw6_fail:
912#endif 1057#endif
@@ -918,10 +1063,6 @@ igmp_fail:
918ndisc_fail: 1063ndisc_fail:
919 icmpv6_cleanup(); 1064 icmpv6_cleanup();
920icmp_fail: 1065icmp_fail:
921#ifdef CONFIG_SYSCTL
922 ipv6_sysctl_unregister();
923sysctl_fail:
924#endif
925 unregister_pernet_subsys(&inet6_net_ops); 1066 unregister_pernet_subsys(&inet6_net_ops);
926register_pernet_fail: 1067register_pernet_fail:
927 cleanup_ipv6_mibs(); 1068 cleanup_ipv6_mibs();
@@ -949,6 +1090,9 @@ static void __exit inet6_exit(void)
949 /* Disallow any further netlink messages */ 1090 /* Disallow any further netlink messages */
950 rtnl_unregister_all(PF_INET6); 1091 rtnl_unregister_all(PF_INET6);
951 1092
1093#ifdef CONFIG_SYSCTL
1094 ipv6_sysctl_unregister();
1095#endif
952 udpv6_exit(); 1096 udpv6_exit();
953 udplitev6_exit(); 1097 udplitev6_exit();
954 tcpv6_exit(); 1098 tcpv6_exit();
@@ -964,11 +1108,8 @@ static void __exit inet6_exit(void)
964 1108
965 /* Cleanup code parts. */ 1109 /* Cleanup code parts. */
966 if6_proc_exit(); 1110 if6_proc_exit();
967 ac6_proc_exit();
968 ipv6_misc_proc_exit(); 1111 ipv6_misc_proc_exit();
969 udplite6_proc_exit(); 1112 udplite6_proc_exit();
970 udp6_proc_exit();
971 tcp6_proc_exit();
972 raw6_proc_exit(); 1113 raw6_proc_exit();
973#endif 1114#endif
974 ipv6_netfilter_fini(); 1115 ipv6_netfilter_fini();
@@ -976,9 +1117,7 @@ static void __exit inet6_exit(void)
976 ndisc_cleanup(); 1117 ndisc_cleanup();
977 icmpv6_cleanup(); 1118 icmpv6_cleanup();
978 rawv6_exit(); 1119 rawv6_exit();
979#ifdef CONFIG_SYSCTL 1120
980 ipv6_sysctl_unregister();
981#endif
982 unregister_pernet_subsys(&inet6_net_ops); 1121 unregister_pernet_subsys(&inet6_net_ops);
983 cleanup_ipv6_mibs(); 1122 cleanup_ipv6_mibs();
984 proto_unregister(&rawv6_prot); 1123 proto_unregister(&rawv6_prot);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index e5f56c953b58..4e1b29fabdf0 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -48,29 +48,6 @@ static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr);
48/* Big ac list lock for all the sockets */ 48/* Big ac list lock for all the sockets */
49static DEFINE_RWLOCK(ipv6_sk_ac_lock); 49static DEFINE_RWLOCK(ipv6_sk_ac_lock);
50 50
51static int
52ip6_onlink(struct in6_addr *addr, struct net_device *dev)
53{
54 struct inet6_dev *idev;
55 struct inet6_ifaddr *ifa;
56 int onlink;
57
58 onlink = 0;
59 rcu_read_lock();
60 idev = __in6_dev_get(dev);
61 if (idev) {
62 read_lock_bh(&idev->lock);
63 for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) {
64 onlink = ipv6_prefix_equal(addr, &ifa->addr,
65 ifa->prefix_len);
66 if (onlink)
67 break;
68 }
69 read_unlock_bh(&idev->lock);
70 }
71 rcu_read_unlock();
72 return onlink;
73}
74 51
75/* 52/*
76 * socket join an anycast group 53 * socket join an anycast group
@@ -82,6 +59,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
82 struct net_device *dev = NULL; 59 struct net_device *dev = NULL;
83 struct inet6_dev *idev; 60 struct inet6_dev *idev;
84 struct ipv6_ac_socklist *pac; 61 struct ipv6_ac_socklist *pac;
62 struct net *net = sock_net(sk);
85 int ishost = !ipv6_devconf.forwarding; 63 int ishost = !ipv6_devconf.forwarding;
86 int err = 0; 64 int err = 0;
87 65
@@ -89,7 +67,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
89 return -EPERM; 67 return -EPERM;
90 if (ipv6_addr_is_multicast(addr)) 68 if (ipv6_addr_is_multicast(addr))
91 return -EINVAL; 69 return -EINVAL;
92 if (ipv6_chk_addr(&init_net, addr, NULL, 0)) 70 if (ipv6_chk_addr(net, addr, NULL, 0))
93 return -EINVAL; 71 return -EINVAL;
94 72
95 pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL); 73 pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL);
@@ -101,7 +79,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
101 if (ifindex == 0) { 79 if (ifindex == 0) {
102 struct rt6_info *rt; 80 struct rt6_info *rt;
103 81
104 rt = rt6_lookup(addr, NULL, 0, 0); 82 rt = rt6_lookup(net, addr, NULL, 0, 0);
105 if (rt) { 83 if (rt) {
106 dev = rt->rt6i_dev; 84 dev = rt->rt6i_dev;
107 dev_hold(dev); 85 dev_hold(dev);
@@ -112,10 +90,10 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
112 } else { 90 } else {
113 /* router, no matching interface: just pick one */ 91 /* router, no matching interface: just pick one */
114 92
115 dev = dev_get_by_flags(&init_net, IFF_UP, IFF_UP|IFF_LOOPBACK); 93 dev = dev_get_by_flags(net, IFF_UP, IFF_UP|IFF_LOOPBACK);
116 } 94 }
117 } else 95 } else
118 dev = dev_get_by_index(&init_net, ifindex); 96 dev = dev_get_by_index(net, ifindex);
119 97
120 if (dev == NULL) { 98 if (dev == NULL) {
121 err = -ENODEV; 99 err = -ENODEV;
@@ -141,7 +119,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
141 * This obviates the need for propagating anycast routes while 119 * This obviates the need for propagating anycast routes while
142 * still allowing some non-router anycast participation. 120 * still allowing some non-router anycast participation.
143 */ 121 */
144 if (!ip6_onlink(addr, dev)) { 122 if (!ipv6_chk_prefix(addr, dev)) {
145 if (ishost) 123 if (ishost)
146 err = -EADDRNOTAVAIL; 124 err = -EADDRNOTAVAIL;
147 if (err) 125 if (err)
@@ -176,6 +154,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
176 struct ipv6_pinfo *np = inet6_sk(sk); 154 struct ipv6_pinfo *np = inet6_sk(sk);
177 struct net_device *dev; 155 struct net_device *dev;
178 struct ipv6_ac_socklist *pac, *prev_pac; 156 struct ipv6_ac_socklist *pac, *prev_pac;
157 struct net *net = sock_net(sk);
179 158
180 write_lock_bh(&ipv6_sk_ac_lock); 159 write_lock_bh(&ipv6_sk_ac_lock);
181 prev_pac = NULL; 160 prev_pac = NULL;
@@ -196,7 +175,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
196 175
197 write_unlock_bh(&ipv6_sk_ac_lock); 176 write_unlock_bh(&ipv6_sk_ac_lock);
198 177
199 dev = dev_get_by_index(&init_net, pac->acl_ifindex); 178 dev = dev_get_by_index(net, pac->acl_ifindex);
200 if (dev) { 179 if (dev) {
201 ipv6_dev_ac_dec(dev, &pac->acl_addr); 180 ipv6_dev_ac_dec(dev, &pac->acl_addr);
202 dev_put(dev); 181 dev_put(dev);
@@ -210,6 +189,7 @@ void ipv6_sock_ac_close(struct sock *sk)
210 struct ipv6_pinfo *np = inet6_sk(sk); 189 struct ipv6_pinfo *np = inet6_sk(sk);
211 struct net_device *dev = NULL; 190 struct net_device *dev = NULL;
212 struct ipv6_ac_socklist *pac; 191 struct ipv6_ac_socklist *pac;
192 struct net *net = sock_net(sk);
213 int prev_index; 193 int prev_index;
214 194
215 write_lock_bh(&ipv6_sk_ac_lock); 195 write_lock_bh(&ipv6_sk_ac_lock);
@@ -224,7 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk)
224 if (pac->acl_ifindex != prev_index) { 204 if (pac->acl_ifindex != prev_index) {
225 if (dev) 205 if (dev)
226 dev_put(dev); 206 dev_put(dev);
227 dev = dev_get_by_index(&init_net, pac->acl_ifindex); 207 dev = dev_get_by_index(net, pac->acl_ifindex);
228 prev_index = pac->acl_ifindex; 208 prev_index = pac->acl_ifindex;
229 } 209 }
230 if (dev) 210 if (dev)
@@ -417,14 +397,15 @@ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
417/* 397/*
418 * check if given interface (or any, if dev==0) has this anycast address 398 * check if given interface (or any, if dev==0) has this anycast address
419 */ 399 */
420int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr) 400int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
401 struct in6_addr *addr)
421{ 402{
422 int found = 0; 403 int found = 0;
423 404
424 if (dev) 405 if (dev)
425 return ipv6_chk_acast_dev(dev, addr); 406 return ipv6_chk_acast_dev(dev, addr);
426 read_lock(&dev_base_lock); 407 read_lock(&dev_base_lock);
427 for_each_netdev(&init_net, dev) 408 for_each_netdev(net, dev)
428 if (ipv6_chk_acast_dev(dev, addr)) { 409 if (ipv6_chk_acast_dev(dev, addr)) {
429 found = 1; 410 found = 1;
430 break; 411 break;
@@ -436,6 +417,7 @@ int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr)
436 417
437#ifdef CONFIG_PROC_FS 418#ifdef CONFIG_PROC_FS
438struct ac6_iter_state { 419struct ac6_iter_state {
420 struct seq_net_private p;
439 struct net_device *dev; 421 struct net_device *dev;
440 struct inet6_dev *idev; 422 struct inet6_dev *idev;
441}; 423};
@@ -446,9 +428,10 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
446{ 428{
447 struct ifacaddr6 *im = NULL; 429 struct ifacaddr6 *im = NULL;
448 struct ac6_iter_state *state = ac6_seq_private(seq); 430 struct ac6_iter_state *state = ac6_seq_private(seq);
431 struct net *net = seq_file_net(seq);
449 432
450 state->idev = NULL; 433 state->idev = NULL;
451 for_each_netdev(&init_net, state->dev) { 434 for_each_netdev(net, state->dev) {
452 struct inet6_dev *idev; 435 struct inet6_dev *idev;
453 idev = in6_dev_get(state->dev); 436 idev = in6_dev_get(state->dev);
454 if (!idev) 437 if (!idev)
@@ -546,8 +529,8 @@ static const struct seq_operations ac6_seq_ops = {
546 529
547static int ac6_seq_open(struct inode *inode, struct file *file) 530static int ac6_seq_open(struct inode *inode, struct file *file)
548{ 531{
549 return seq_open_private(file, &ac6_seq_ops, 532 return seq_open_net(inode, file, &ac6_seq_ops,
550 sizeof(struct ac6_iter_state)); 533 sizeof(struct ac6_iter_state));
551} 534}
552 535
553static const struct file_operations ac6_seq_fops = { 536static const struct file_operations ac6_seq_fops = {
@@ -555,20 +538,20 @@ static const struct file_operations ac6_seq_fops = {
555 .open = ac6_seq_open, 538 .open = ac6_seq_open,
556 .read = seq_read, 539 .read = seq_read,
557 .llseek = seq_lseek, 540 .llseek = seq_lseek,
558 .release = seq_release_private, 541 .release = seq_release_net,
559}; 542};
560 543
561int __init ac6_proc_init(void) 544int ac6_proc_init(struct net *net)
562{ 545{
563 if (!proc_net_fops_create(&init_net, "anycast6", S_IRUGO, &ac6_seq_fops)) 546 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops))
564 return -ENOMEM; 547 return -ENOMEM;
565 548
566 return 0; 549 return 0;
567} 550}
568 551
569void ac6_proc_exit(void) 552void ac6_proc_exit(struct net *net)
570{ 553{
571 proc_net_remove(&init_net, "anycast6"); 554 proc_net_remove(net, "anycast6");
572} 555}
573#endif 556#endif
574 557
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 695c0ca8a417..8d05527524e3 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -29,24 +29,22 @@ struct fib6_rule
29 u8 tclass; 29 u8 tclass;
30}; 30};
31 31
32static struct fib_rules_ops fib6_rules_ops; 32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
33 33 int flags, pol_lookup_t lookup)
34struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags,
35 pol_lookup_t lookup)
36{ 34{
37 struct fib_lookup_arg arg = { 35 struct fib_lookup_arg arg = {
38 .lookup_ptr = lookup, 36 .lookup_ptr = lookup,
39 }; 37 };
40 38
41 fib_rules_lookup(&fib6_rules_ops, fl, flags, &arg); 39 fib_rules_lookup(net->ipv6.fib6_rules_ops, fl, flags, &arg);
42 if (arg.rule) 40 if (arg.rule)
43 fib_rule_put(arg.rule); 41 fib_rule_put(arg.rule);
44 42
45 if (arg.result) 43 if (arg.result)
46 return arg.result; 44 return arg.result;
47 45
48 dst_hold(&ip6_null_entry.u.dst); 46 dst_hold(&net->ipv6.ip6_null_entry->u.dst);
49 return &ip6_null_entry.u.dst; 47 return &net->ipv6.ip6_null_entry->u.dst;
50} 48}
51 49
52static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 50static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -54,28 +52,29 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
54{ 52{
55 struct rt6_info *rt = NULL; 53 struct rt6_info *rt = NULL;
56 struct fib6_table *table; 54 struct fib6_table *table;
55 struct net *net = rule->fr_net;
57 pol_lookup_t lookup = arg->lookup_ptr; 56 pol_lookup_t lookup = arg->lookup_ptr;
58 57
59 switch (rule->action) { 58 switch (rule->action) {
60 case FR_ACT_TO_TBL: 59 case FR_ACT_TO_TBL:
61 break; 60 break;
62 case FR_ACT_UNREACHABLE: 61 case FR_ACT_UNREACHABLE:
63 rt = &ip6_null_entry; 62 rt = net->ipv6.ip6_null_entry;
64 goto discard_pkt; 63 goto discard_pkt;
65 default: 64 default:
66 case FR_ACT_BLACKHOLE: 65 case FR_ACT_BLACKHOLE:
67 rt = &ip6_blk_hole_entry; 66 rt = net->ipv6.ip6_blk_hole_entry;
68 goto discard_pkt; 67 goto discard_pkt;
69 case FR_ACT_PROHIBIT: 68 case FR_ACT_PROHIBIT:
70 rt = &ip6_prohibit_entry; 69 rt = net->ipv6.ip6_prohibit_entry;
71 goto discard_pkt; 70 goto discard_pkt;
72 } 71 }
73 72
74 table = fib6_get_table(rule->table); 73 table = fib6_get_table(net, rule->table);
75 if (table) 74 if (table)
76 rt = lookup(table, flp, flags); 75 rt = lookup(net, table, flp, flags);
77 76
78 if (rt != &ip6_null_entry) { 77 if (rt != net->ipv6.ip6_null_entry) {
79 struct fib6_rule *r = (struct fib6_rule *)rule; 78 struct fib6_rule *r = (struct fib6_rule *)rule;
80 79
81 /* 80 /*
@@ -85,8 +84,18 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
85 if ((rule->flags & FIB_RULE_FIND_SADDR) && 84 if ((rule->flags & FIB_RULE_FIND_SADDR) &&
86 r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { 85 r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) {
87 struct in6_addr saddr; 86 struct in6_addr saddr;
88 if (ipv6_get_saddr(&rt->u.dst, &flp->fl6_dst, 87 unsigned int srcprefs = 0;
89 &saddr)) 88
89 if (flags & RT6_LOOKUP_F_SRCPREF_TMP)
90 srcprefs |= IPV6_PREFER_SRC_TMP;
91 if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC)
92 srcprefs |= IPV6_PREFER_SRC_PUBLIC;
93 if (flags & RT6_LOOKUP_F_SRCPREF_COA)
94 srcprefs |= IPV6_PREFER_SRC_COA;
95
96 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
97 &flp->fl6_dst, srcprefs,
98 &saddr))
90 goto again; 99 goto again;
91 if (!ipv6_prefix_equal(&saddr, &r->src.addr, 100 if (!ipv6_prefix_equal(&saddr, &r->src.addr,
92 r->src.plen)) 101 r->src.plen))
@@ -145,13 +154,14 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
145 struct nlattr **tb) 154 struct nlattr **tb)
146{ 155{
147 int err = -EINVAL; 156 int err = -EINVAL;
157 struct net *net = sock_net(skb->sk);
148 struct fib6_rule *rule6 = (struct fib6_rule *) rule; 158 struct fib6_rule *rule6 = (struct fib6_rule *) rule;
149 159
150 if (rule->action == FR_ACT_TO_TBL) { 160 if (rule->action == FR_ACT_TO_TBL) {
151 if (rule->table == RT6_TABLE_UNSPEC) 161 if (rule->table == RT6_TABLE_UNSPEC)
152 goto errout; 162 goto errout;
153 163
154 if (fib6_new_table(rule->table) == NULL) { 164 if (fib6_new_table(net, rule->table) == NULL) {
155 err = -ENOBUFS; 165 err = -ENOBUFS;
156 goto errout; 166 goto errout;
157 } 167 }
@@ -234,7 +244,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
234 + nla_total_size(16); /* src */ 244 + nla_total_size(16); /* src */
235} 245}
236 246
237static struct fib_rules_ops fib6_rules_ops = { 247static struct fib_rules_ops fib6_rules_ops_template = {
238 .family = AF_INET6, 248 .family = AF_INET6,
239 .rule_size = sizeof(struct fib6_rule), 249 .rule_size = sizeof(struct fib6_rule),
240 .addr_size = sizeof(struct in6_addr), 250 .addr_size = sizeof(struct in6_addr),
@@ -247,45 +257,64 @@ static struct fib_rules_ops fib6_rules_ops = {
247 .nlmsg_payload = fib6_rule_nlmsg_payload, 257 .nlmsg_payload = fib6_rule_nlmsg_payload,
248 .nlgroup = RTNLGRP_IPV6_RULE, 258 .nlgroup = RTNLGRP_IPV6_RULE,
249 .policy = fib6_rule_policy, 259 .policy = fib6_rule_policy,
250 .rules_list = LIST_HEAD_INIT(fib6_rules_ops.rules_list),
251 .owner = THIS_MODULE, 260 .owner = THIS_MODULE,
252 .fro_net = &init_net, 261 .fro_net = &init_net,
253}; 262};
254 263
255static int __init fib6_default_rules_init(void) 264static int fib6_rules_net_init(struct net *net)
256{ 265{
257 int err; 266 int err = -ENOMEM;
258 267
259 err = fib_default_rule_add(&fib6_rules_ops, 0, 268 net->ipv6.fib6_rules_ops = kmemdup(&fib6_rules_ops_template,
260 RT6_TABLE_LOCAL, FIB_RULE_PERMANENT); 269 sizeof(*net->ipv6.fib6_rules_ops),
261 if (err < 0) 270 GFP_KERNEL);
262 return err; 271 if (!net->ipv6.fib6_rules_ops)
263 err = fib_default_rule_add(&fib6_rules_ops, 0x7FFE, RT6_TABLE_MAIN, 0); 272 goto out;
264 if (err < 0)
265 return err;
266 return 0;
267}
268 273
269int __init fib6_rules_init(void) 274 net->ipv6.fib6_rules_ops->fro_net = net;
270{ 275 INIT_LIST_HEAD(&net->ipv6.fib6_rules_ops->rules_list);
271 int ret;
272 276
273 ret = fib6_default_rules_init(); 277 err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0,
274 if (ret) 278 RT6_TABLE_LOCAL, FIB_RULE_PERMANENT);
275 goto out; 279 if (err)
280 goto out_fib6_rules_ops;
276 281
277 ret = fib_rules_register(&fib6_rules_ops); 282 err = fib_default_rule_add(net->ipv6.fib6_rules_ops,
278 if (ret) 283 0x7FFE, RT6_TABLE_MAIN, 0);
279 goto out_default_rules_init; 284 if (err)
285 goto out_fib6_default_rule_add;
286
287 err = fib_rules_register(net->ipv6.fib6_rules_ops);
288 if (err)
289 goto out_fib6_default_rule_add;
280out: 290out:
281 return ret; 291 return err;
282 292
283out_default_rules_init: 293out_fib6_default_rule_add:
284 fib_rules_cleanup_ops(&fib6_rules_ops); 294 fib_rules_cleanup_ops(net->ipv6.fib6_rules_ops);
295out_fib6_rules_ops:
296 kfree(net->ipv6.fib6_rules_ops);
285 goto out; 297 goto out;
286} 298}
287 299
300static void fib6_rules_net_exit(struct net *net)
301{
302 fib_rules_unregister(net->ipv6.fib6_rules_ops);
303 kfree(net->ipv6.fib6_rules_ops);
304}
305
306static struct pernet_operations fib6_rules_net_ops = {
307 .init = fib6_rules_net_init,
308 .exit = fib6_rules_net_exit,
309};
310
311int __init fib6_rules_init(void)
312{
313 return register_pernet_subsys(&fib6_rules_net_ops);
314}
315
316
288void fib6_rules_cleanup(void) 317void fib6_rules_cleanup(void)
289{ 318{
290 fib_rules_unregister(&fib6_rules_ops); 319 unregister_pernet_subsys(&fib6_rules_net_ops);
291} 320}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 893287ecc628..d42dd16d3487 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -64,6 +64,7 @@
64#include <net/addrconf.h> 64#include <net/addrconf.h>
65#include <net/icmp.h> 65#include <net/icmp.h>
66#include <net/xfrm.h> 66#include <net/xfrm.h>
67#include <net/inet_common.h>
67 68
68#include <asm/uaccess.h> 69#include <asm/uaccess.h>
69#include <asm/system.h> 70#include <asm/system.h>
@@ -80,8 +81,10 @@ EXPORT_SYMBOL(icmpv6msg_statistics);
80 * 81 *
81 * On SMP we have one ICMP socket per-cpu. 82 * On SMP we have one ICMP socket per-cpu.
82 */ 83 */
83static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL; 84static inline struct sock *icmpv6_sk(struct net *net)
84#define icmpv6_socket __get_cpu_var(__icmpv6_socket) 85{
86 return net->ipv6.icmp_sk[smp_processor_id()];
87}
85 88
86static int icmpv6_rcv(struct sk_buff *skb); 89static int icmpv6_rcv(struct sk_buff *skb);
87 90
@@ -90,11 +93,11 @@ static struct inet6_protocol icmpv6_protocol = {
90 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 93 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
91}; 94};
92 95
93static __inline__ int icmpv6_xmit_lock(void) 96static __inline__ int icmpv6_xmit_lock(struct sock *sk)
94{ 97{
95 local_bh_disable(); 98 local_bh_disable();
96 99
97 if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) { 100 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
98 /* This can happen if the output path (f.e. SIT or 101 /* This can happen if the output path (f.e. SIT or
99 * ip6ip6 tunnel) signals dst_link_failure() for an 102 * ip6ip6 tunnel) signals dst_link_failure() for an
100 * outgoing ICMP6 packet. 103 * outgoing ICMP6 packet.
@@ -105,9 +108,9 @@ static __inline__ int icmpv6_xmit_lock(void)
105 return 0; 108 return 0;
106} 109}
107 110
108static __inline__ void icmpv6_xmit_unlock(void) 111static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
109{ 112{
110 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock); 113 spin_unlock_bh(&sk->sk_lock.slock);
111} 114}
112 115
113/* 116/*
@@ -161,6 +164,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
161 struct flowi *fl) 164 struct flowi *fl)
162{ 165{
163 struct dst_entry *dst; 166 struct dst_entry *dst;
167 struct net *net = sock_net(sk);
164 int res = 0; 168 int res = 0;
165 169
166 /* Informational messages are not limited. */ 170 /* Informational messages are not limited. */
@@ -176,7 +180,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
176 * XXX: perhaps the expire for routing entries cloned by 180 * XXX: perhaps the expire for routing entries cloned by
177 * this lookup should be more aggressive (not longer than timeout). 181 * this lookup should be more aggressive (not longer than timeout).
178 */ 182 */
179 dst = ip6_route_output(sk, fl); 183 dst = ip6_route_output(net, sk, fl);
180 if (dst->error) { 184 if (dst->error) {
181 IP6_INC_STATS(ip6_dst_idev(dst), 185 IP6_INC_STATS(ip6_dst_idev(dst),
182 IPSTATS_MIB_OUTNOROUTES); 186 IPSTATS_MIB_OUTNOROUTES);
@@ -184,7 +188,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
184 res = 1; 188 res = 1;
185 } else { 189 } else {
186 struct rt6_info *rt = (struct rt6_info *)dst; 190 struct rt6_info *rt = (struct rt6_info *)dst;
187 int tmo = init_net.ipv6.sysctl.icmpv6_time; 191 int tmo = net->ipv6.sysctl.icmpv6_time;
188 192
189 /* Give more bandwidth to wider prefixes. */ 193 /* Give more bandwidth to wider prefixes. */
190 if (rt->rt6i_dst.plen < 128) 194 if (rt->rt6i_dst.plen < 128)
@@ -303,6 +307,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
303void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, 307void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
304 struct net_device *dev) 308 struct net_device *dev)
305{ 309{
310 struct net *net = dev_net(skb->dev);
306 struct inet6_dev *idev = NULL; 311 struct inet6_dev *idev = NULL;
307 struct ipv6hdr *hdr = ipv6_hdr(skb); 312 struct ipv6hdr *hdr = ipv6_hdr(skb);
308 struct sock *sk; 313 struct sock *sk;
@@ -332,7 +337,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
332 */ 337 */
333 addr_type = ipv6_addr_type(&hdr->daddr); 338 addr_type = ipv6_addr_type(&hdr->daddr);
334 339
335 if (ipv6_chk_addr(&init_net, &hdr->daddr, skb->dev, 0)) 340 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
336 saddr = &hdr->daddr; 341 saddr = &hdr->daddr;
337 342
338 /* 343 /*
@@ -389,12 +394,12 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
389 fl.fl_icmp_code = code; 394 fl.fl_icmp_code = code;
390 security_skb_classify_flow(skb, &fl); 395 security_skb_classify_flow(skb, &fl);
391 396
392 if (icmpv6_xmit_lock()) 397 sk = icmpv6_sk(net);
393 return;
394
395 sk = icmpv6_socket->sk;
396 np = inet6_sk(sk); 398 np = inet6_sk(sk);
397 399
400 if (icmpv6_xmit_lock(sk))
401 return;
402
398 if (!icmpv6_xrlim_allow(sk, type, &fl)) 403 if (!icmpv6_xrlim_allow(sk, type, &fl))
399 goto out; 404 goto out;
400 405
@@ -462,9 +467,7 @@ route_done:
462 else 467 else
463 hlimit = np->hop_limit; 468 hlimit = np->hop_limit;
464 if (hlimit < 0) 469 if (hlimit < 0)
465 hlimit = dst_metric(dst, RTAX_HOPLIMIT); 470 hlimit = ip6_dst_hoplimit(dst);
466 if (hlimit < 0)
467 hlimit = ipv6_get_hoplimit(dst->dev);
468 471
469 tclass = np->tclass; 472 tclass = np->tclass;
470 if (tclass < 0) 473 if (tclass < 0)
@@ -500,13 +503,14 @@ out_put:
500out_dst_release: 503out_dst_release:
501 dst_release(dst); 504 dst_release(dst);
502out: 505out:
503 icmpv6_xmit_unlock(); 506 icmpv6_xmit_unlock(sk);
504} 507}
505 508
506EXPORT_SYMBOL(icmpv6_send); 509EXPORT_SYMBOL(icmpv6_send);
507 510
508static void icmpv6_echo_reply(struct sk_buff *skb) 511static void icmpv6_echo_reply(struct sk_buff *skb)
509{ 512{
513 struct net *net = dev_net(skb->dev);
510 struct sock *sk; 514 struct sock *sk;
511 struct inet6_dev *idev; 515 struct inet6_dev *idev;
512 struct ipv6_pinfo *np; 516 struct ipv6_pinfo *np;
@@ -537,12 +541,12 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
537 fl.fl_icmp_type = ICMPV6_ECHO_REPLY; 541 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
538 security_skb_classify_flow(skb, &fl); 542 security_skb_classify_flow(skb, &fl);
539 543
540 if (icmpv6_xmit_lock()) 544 sk = icmpv6_sk(net);
541 return;
542
543 sk = icmpv6_socket->sk;
544 np = inet6_sk(sk); 545 np = inet6_sk(sk);
545 546
547 if (icmpv6_xmit_lock(sk))
548 return;
549
546 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 550 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
547 fl.oif = np->mcast_oif; 551 fl.oif = np->mcast_oif;
548 552
@@ -557,9 +561,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
557 else 561 else
558 hlimit = np->hop_limit; 562 hlimit = np->hop_limit;
559 if (hlimit < 0) 563 if (hlimit < 0)
560 hlimit = dst_metric(dst, RTAX_HOPLIMIT); 564 hlimit = ip6_dst_hoplimit(dst);
561 if (hlimit < 0)
562 hlimit = ipv6_get_hoplimit(dst->dev);
563 565
564 tclass = np->tclass; 566 tclass = np->tclass;
565 if (tclass < 0) 567 if (tclass < 0)
@@ -586,7 +588,7 @@ out_put:
586 in6_dev_put(idev); 588 in6_dev_put(idev);
587 dst_release(dst); 589 dst_release(dst);
588out: 590out:
589 icmpv6_xmit_unlock(); 591 icmpv6_xmit_unlock(sk);
590} 592}
591 593
592static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) 594static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
@@ -777,19 +779,40 @@ drop_no_count:
777 return 0; 779 return 0;
778} 780}
779 781
782void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
783 u8 type,
784 const struct in6_addr *saddr,
785 const struct in6_addr *daddr,
786 int oif)
787{
788 memset(fl, 0, sizeof(*fl));
789 ipv6_addr_copy(&fl->fl6_src, saddr);
790 ipv6_addr_copy(&fl->fl6_dst, daddr);
791 fl->proto = IPPROTO_ICMPV6;
792 fl->fl_icmp_type = type;
793 fl->fl_icmp_code = 0;
794 fl->oif = oif;
795 security_sk_classify_flow(sk, fl);
796}
797
780/* 798/*
781 * Special lock-class for __icmpv6_socket: 799 * Special lock-class for __icmpv6_sk:
782 */ 800 */
783static struct lock_class_key icmpv6_socket_sk_dst_lock_key; 801static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
784 802
785int __init icmpv6_init(struct net_proto_family *ops) 803static int __net_init icmpv6_sk_init(struct net *net)
786{ 804{
787 struct sock *sk; 805 struct sock *sk;
788 int err, i, j; 806 int err, i, j;
789 807
808 net->ipv6.icmp_sk =
809 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
810 if (net->ipv6.icmp_sk == NULL)
811 return -ENOMEM;
812
790 for_each_possible_cpu(i) { 813 for_each_possible_cpu(i) {
791 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, 814 err = inet_ctl_sock_create(&sk, PF_INET6,
792 &per_cpu(__icmpv6_socket, i)); 815 SOCK_RAW, IPPROTO_ICMPV6, net);
793 if (err < 0) { 816 if (err < 0) {
794 printk(KERN_ERR 817 printk(KERN_ERR
795 "Failed to initialize the ICMP6 control socket " 818 "Failed to initialize the ICMP6 control socket "
@@ -798,12 +821,12 @@ int __init icmpv6_init(struct net_proto_family *ops)
798 goto fail; 821 goto fail;
799 } 822 }
800 823
801 sk = per_cpu(__icmpv6_socket, i)->sk; 824 net->ipv6.icmp_sk[i] = sk;
802 sk->sk_allocation = GFP_ATOMIC; 825
803 /* 826 /*
804 * Split off their lock-class, because sk->sk_dst_lock 827 * Split off their lock-class, because sk->sk_dst_lock
805 * gets used from softirqs, which is safe for 828 * gets used from softirqs, which is safe for
806 * __icmpv6_socket (because those never get directly used 829 * __icmpv6_sk (because those never get directly used
807 * via userspace syscalls), but unsafe for normal sockets. 830 * via userspace syscalls), but unsafe for normal sockets.
808 */ 831 */
809 lockdep_set_class(&sk->sk_dst_lock, 832 lockdep_set_class(&sk->sk_dst_lock,
@@ -814,39 +837,57 @@ int __init icmpv6_init(struct net_proto_family *ops)
814 */ 837 */
815 sk->sk_sndbuf = 838 sk->sk_sndbuf =
816 (2 * ((64 * 1024) + sizeof(struct sk_buff))); 839 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
817
818 sk->sk_prot->unhash(sk);
819 } 840 }
820
821
822 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
823 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
824 err = -EAGAIN;
825 goto fail;
826 }
827
828 return 0; 841 return 0;
829 842
830 fail: 843 fail:
831 for (j = 0; j < i; j++) { 844 for (j = 0; j < i; j++)
832 if (!cpu_possible(j)) 845 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
833 continue; 846 kfree(net->ipv6.icmp_sk);
834 sock_release(per_cpu(__icmpv6_socket, j));
835 }
836
837 return err; 847 return err;
838} 848}
839 849
840void icmpv6_cleanup(void) 850static void __net_exit icmpv6_sk_exit(struct net *net)
841{ 851{
842 int i; 852 int i;
843 853
844 for_each_possible_cpu(i) { 854 for_each_possible_cpu(i) {
845 sock_release(per_cpu(__icmpv6_socket, i)); 855 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
846 } 856 }
857 kfree(net->ipv6.icmp_sk);
858}
859
860static struct pernet_operations icmpv6_sk_ops = {
861 .init = icmpv6_sk_init,
862 .exit = icmpv6_sk_exit,
863};
864
865int __init icmpv6_init(void)
866{
867 int err;
868
869 err = register_pernet_subsys(&icmpv6_sk_ops);
870 if (err < 0)
871 return err;
872
873 err = -EAGAIN;
874 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
875 goto fail;
876 return 0;
877
878fail:
879 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
880 unregister_pernet_subsys(&icmpv6_sk_ops);
881 return err;
882}
883
884void icmpv6_cleanup(void)
885{
886 unregister_pernet_subsys(&icmpv6_sk_ops);
847 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); 887 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
848} 888}
849 889
890
850static const struct icmp6_err { 891static const struct icmp6_err {
851 int err; 892 int err;
852 int fatal; 893 int fatal;
@@ -927,6 +968,10 @@ struct ctl_table *ipv6_icmp_sysctl_init(struct net *net)
927 table = kmemdup(ipv6_icmp_table_template, 968 table = kmemdup(ipv6_icmp_table_template,
928 sizeof(ipv6_icmp_table_template), 969 sizeof(ipv6_icmp_table_template),
929 GFP_KERNEL); 970 GFP_KERNEL);
971
972 if (table)
973 table[0].data = &net->ipv6.sysctl.icmpv6_time;
974
930 return table; 975 return table;
931} 976}
932#endif 977#endif
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 78de42ada844..87801cc1b2f8 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -33,6 +33,10 @@ int inet6_csk_bind_conflict(const struct sock *sk,
33 const struct hlist_node *node; 33 const struct hlist_node *node;
34 34
35 /* We must walk the whole port owner list in this case. -DaveM */ 35 /* We must walk the whole port owner list in this case. -DaveM */
36 /*
37 * See comment in inet_csk_bind_conflict about sock lookup
38 * vs net namespaces issues.
39 */
36 sk_for_each_bound(sk2, node, &tb->owners) { 40 sk_for_each_bound(sk2, node, &tb->owners) {
37 if (sk != sk2 && 41 if (sk != sk2 &&
38 (!sk->sk_bound_dev_if || 42 (!sk->sk_bound_dev_if ||
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 99fd25f7f005..580014aea4d6 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -24,7 +24,7 @@
24 24
25void __inet6_hash(struct sock *sk) 25void __inet6_hash(struct sock *sk)
26{ 26{
27 struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 27 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
28 struct hlist_head *list; 28 struct hlist_head *list;
29 rwlock_t *lock; 29 rwlock_t *lock;
30 30
@@ -43,7 +43,7 @@ void __inet6_hash(struct sock *sk)
43 } 43 }
44 44
45 __sk_add_node(sk, list); 45 __sk_add_node(sk, list);
46 sock_prot_inuse_add(sk->sk_prot, 1); 46 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
47 write_unlock(lock); 47 write_unlock(lock);
48} 48}
49EXPORT_SYMBOL(__inet6_hash); 49EXPORT_SYMBOL(__inet6_hash);
@@ -105,7 +105,7 @@ struct sock *inet6_lookup_listener(struct net *net,
105 105
106 read_lock(&hashinfo->lhash_lock); 106 read_lock(&hashinfo->lhash_lock);
107 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) { 107 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) {
108 if (sk->sk_net == net && inet_sk(sk)->num == hnum && 108 if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum &&
109 sk->sk_family == PF_INET6) { 109 sk->sk_family == PF_INET6) {
110 const struct ipv6_pinfo *np = inet6_sk(sk); 110 const struct ipv6_pinfo *np = inet6_sk(sk);
111 111
@@ -172,7 +172,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
172 struct sock *sk2; 172 struct sock *sk2;
173 const struct hlist_node *node; 173 const struct hlist_node *node;
174 struct inet_timewait_sock *tw; 174 struct inet_timewait_sock *tw;
175 struct net *net = sk->sk_net; 175 struct net *net = sock_net(sk);
176 176
177 prefetch(head->chain.first); 177 prefetch(head->chain.first);
178 write_lock(lock); 178 write_lock(lock);
@@ -204,7 +204,7 @@ unique:
204 BUG_TRAP(sk_unhashed(sk)); 204 BUG_TRAP(sk_unhashed(sk));
205 __sk_add_node(sk, &head->chain); 205 __sk_add_node(sk, &head->chain);
206 sk->sk_hash = hash; 206 sk->sk_hash = hash;
207 sock_prot_inuse_add(sk->sk_prot, 1); 207 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
208 write_unlock(lock); 208 write_unlock(lock);
209 209
210 if (twp != NULL) { 210 if (twp != NULL) {
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bab72b6f1444..50f3f8f8a59b 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -48,8 +48,6 @@
48#define RT6_TRACE(x...) do { ; } while (0) 48#define RT6_TRACE(x...) do { ; } while (0)
49#endif 49#endif
50 50
51struct rt6_statistics rt6_stats;
52
53static struct kmem_cache * fib6_node_kmem __read_mostly; 51static struct kmem_cache * fib6_node_kmem __read_mostly;
54 52
55enum fib_walk_state_t 53enum fib_walk_state_t
@@ -66,6 +64,7 @@ enum fib_walk_state_t
66struct fib6_cleaner_t 64struct fib6_cleaner_t
67{ 65{
68 struct fib6_walker_t w; 66 struct fib6_walker_t w;
67 struct net *net;
69 int (*func)(struct rt6_info *, void *arg); 68 int (*func)(struct rt6_info *, void *arg);
70 void *arg; 69 void *arg;
71}; 70};
@@ -78,9 +77,10 @@ static DEFINE_RWLOCK(fib6_walker_lock);
78#define FWS_INIT FWS_L 77#define FWS_INIT FWS_L
79#endif 78#endif
80 79
81static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt); 80static void fib6_prune_clones(struct net *net, struct fib6_node *fn,
82static struct rt6_info * fib6_find_prefix(struct fib6_node *fn); 81 struct rt6_info *rt);
83static struct fib6_node * fib6_repair_tree(struct fib6_node *fn); 82static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
83static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
84static int fib6_walk(struct fib6_walker_t *w); 84static int fib6_walk(struct fib6_walker_t *w);
85static int fib6_walk_continue(struct fib6_walker_t *w); 85static int fib6_walk_continue(struct fib6_walker_t *w);
86 86
@@ -93,7 +93,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w);
93 93
94static __u32 rt_sernum; 94static __u32 rt_sernum;
95 95
96static DEFINE_TIMER(ip6_fib_timer, fib6_run_gc, 0, 0); 96static void fib6_gc_timer_cb(unsigned long arg);
97 97
98static struct fib6_walker_t fib6_walker_list = { 98static struct fib6_walker_t fib6_walker_list = {
99 .prev = &fib6_walker_list, 99 .prev = &fib6_walker_list,
@@ -166,22 +166,13 @@ static __inline__ void rt6_release(struct rt6_info *rt)
166 dst_free(&rt->u.dst); 166 dst_free(&rt->u.dst);
167} 167}
168 168
169static struct fib6_table fib6_main_tbl = {
170 .tb6_id = RT6_TABLE_MAIN,
171 .tb6_root = {
172 .leaf = &ip6_null_entry,
173 .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
174 },
175};
176
177#ifdef CONFIG_IPV6_MULTIPLE_TABLES 169#ifdef CONFIG_IPV6_MULTIPLE_TABLES
178#define FIB_TABLE_HASHSZ 256 170#define FIB_TABLE_HASHSZ 256
179#else 171#else
180#define FIB_TABLE_HASHSZ 1 172#define FIB_TABLE_HASHSZ 1
181#endif 173#endif
182static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ];
183 174
184static void fib6_link_table(struct fib6_table *tb) 175static void fib6_link_table(struct net *net, struct fib6_table *tb)
185{ 176{
186 unsigned int h; 177 unsigned int h;
187 178
@@ -197,52 +188,46 @@ static void fib6_link_table(struct fib6_table *tb)
197 * No protection necessary, this is the only list mutatation 188 * No protection necessary, this is the only list mutatation
198 * operation, tables never disappear once they exist. 189 * operation, tables never disappear once they exist.
199 */ 190 */
200 hlist_add_head_rcu(&tb->tb6_hlist, &fib_table_hash[h]); 191 hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]);
201} 192}
202 193
203#ifdef CONFIG_IPV6_MULTIPLE_TABLES 194#ifdef CONFIG_IPV6_MULTIPLE_TABLES
204static struct fib6_table fib6_local_tbl = {
205 .tb6_id = RT6_TABLE_LOCAL,
206 .tb6_root = {
207 .leaf = &ip6_null_entry,
208 .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
209 },
210};
211 195
212static struct fib6_table *fib6_alloc_table(u32 id) 196static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
213{ 197{
214 struct fib6_table *table; 198 struct fib6_table *table;
215 199
216 table = kzalloc(sizeof(*table), GFP_ATOMIC); 200 table = kzalloc(sizeof(*table), GFP_ATOMIC);
217 if (table != NULL) { 201 if (table != NULL) {
218 table->tb6_id = id; 202 table->tb6_id = id;
219 table->tb6_root.leaf = &ip6_null_entry; 203 table->tb6_root.leaf = net->ipv6.ip6_null_entry;
220 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 204 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
221 } 205 }
222 206
223 return table; 207 return table;
224} 208}
225 209
226struct fib6_table *fib6_new_table(u32 id) 210struct fib6_table *fib6_new_table(struct net *net, u32 id)
227{ 211{
228 struct fib6_table *tb; 212 struct fib6_table *tb;
229 213
230 if (id == 0) 214 if (id == 0)
231 id = RT6_TABLE_MAIN; 215 id = RT6_TABLE_MAIN;
232 tb = fib6_get_table(id); 216 tb = fib6_get_table(net, id);
233 if (tb) 217 if (tb)
234 return tb; 218 return tb;
235 219
236 tb = fib6_alloc_table(id); 220 tb = fib6_alloc_table(net, id);
237 if (tb != NULL) 221 if (tb != NULL)
238 fib6_link_table(tb); 222 fib6_link_table(net, tb);
239 223
240 return tb; 224 return tb;
241} 225}
242 226
243struct fib6_table *fib6_get_table(u32 id) 227struct fib6_table *fib6_get_table(struct net *net, u32 id)
244{ 228{
245 struct fib6_table *tb; 229 struct fib6_table *tb;
230 struct hlist_head *head;
246 struct hlist_node *node; 231 struct hlist_node *node;
247 unsigned int h; 232 unsigned int h;
248 233
@@ -250,7 +235,8 @@ struct fib6_table *fib6_get_table(u32 id)
250 id = RT6_TABLE_MAIN; 235 id = RT6_TABLE_MAIN;
251 h = id & (FIB_TABLE_HASHSZ - 1); 236 h = id & (FIB_TABLE_HASHSZ - 1);
252 rcu_read_lock(); 237 rcu_read_lock();
253 hlist_for_each_entry_rcu(tb, node, &fib_table_hash[h], tb6_hlist) { 238 head = &net->ipv6.fib_table_hash[h];
239 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
254 if (tb->tb6_id == id) { 240 if (tb->tb6_id == id) {
255 rcu_read_unlock(); 241 rcu_read_unlock();
256 return tb; 242 return tb;
@@ -261,33 +247,32 @@ struct fib6_table *fib6_get_table(u32 id)
261 return NULL; 247 return NULL;
262} 248}
263 249
264static void __init fib6_tables_init(void) 250static void fib6_tables_init(struct net *net)
265{ 251{
266 fib6_link_table(&fib6_main_tbl); 252 fib6_link_table(net, net->ipv6.fib6_main_tbl);
267 fib6_link_table(&fib6_local_tbl); 253 fib6_link_table(net, net->ipv6.fib6_local_tbl);
268} 254}
269
270#else 255#else
271 256
272struct fib6_table *fib6_new_table(u32 id) 257struct fib6_table *fib6_new_table(struct net *net, u32 id)
273{ 258{
274 return fib6_get_table(id); 259 return fib6_get_table(net, id);
275} 260}
276 261
277struct fib6_table *fib6_get_table(u32 id) 262struct fib6_table *fib6_get_table(struct net *net, u32 id)
278{ 263{
279 return &fib6_main_tbl; 264 return net->ipv6.fib6_main_tbl;
280} 265}
281 266
282struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, 267struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
283 pol_lookup_t lookup) 268 int flags, pol_lookup_t lookup)
284{ 269{
285 return (struct dst_entry *) lookup(&fib6_main_tbl, fl, flags); 270 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags);
286} 271}
287 272
288static void __init fib6_tables_init(void) 273static void fib6_tables_init(struct net *net)
289{ 274{
290 fib6_link_table(&fib6_main_tbl); 275 fib6_link_table(net, net->ipv6.fib6_main_tbl);
291} 276}
292 277
293#endif 278#endif
@@ -361,18 +346,16 @@ end:
361 346
362static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) 347static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
363{ 348{
364 struct net *net = skb->sk->sk_net; 349 struct net *net = sock_net(skb->sk);
365 unsigned int h, s_h; 350 unsigned int h, s_h;
366 unsigned int e = 0, s_e; 351 unsigned int e = 0, s_e;
367 struct rt6_rtnl_dump_arg arg; 352 struct rt6_rtnl_dump_arg arg;
368 struct fib6_walker_t *w; 353 struct fib6_walker_t *w;
369 struct fib6_table *tb; 354 struct fib6_table *tb;
370 struct hlist_node *node; 355 struct hlist_node *node;
356 struct hlist_head *head;
371 int res = 0; 357 int res = 0;
372 358
373 if (net != &init_net)
374 return 0;
375
376 s_h = cb->args[0]; 359 s_h = cb->args[0];
377 s_e = cb->args[1]; 360 s_e = cb->args[1];
378 361
@@ -401,7 +384,8 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
401 384
402 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { 385 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
403 e = 0; 386 e = 0;
404 hlist_for_each_entry(tb, node, &fib_table_hash[h], tb6_hlist) { 387 head = &net->ipv6.fib_table_hash[h];
388 hlist_for_each_entry(tb, node, head, tb6_hlist) {
405 if (e < s_e) 389 if (e < s_e)
406 goto next; 390 goto next;
407 res = fib6_dump_table(tb, skb, cb); 391 res = fib6_dump_table(tb, skb, cb);
@@ -667,29 +651,29 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
667 rt->rt6i_node = fn; 651 rt->rt6i_node = fn;
668 atomic_inc(&rt->rt6i_ref); 652 atomic_inc(&rt->rt6i_ref);
669 inet6_rt_notify(RTM_NEWROUTE, rt, info); 653 inet6_rt_notify(RTM_NEWROUTE, rt, info);
670 rt6_stats.fib_rt_entries++; 654 info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
671 655
672 if ((fn->fn_flags & RTN_RTINFO) == 0) { 656 if ((fn->fn_flags & RTN_RTINFO) == 0) {
673 rt6_stats.fib_route_nodes++; 657 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
674 fn->fn_flags |= RTN_RTINFO; 658 fn->fn_flags |= RTN_RTINFO;
675 } 659 }
676 660
677 return 0; 661 return 0;
678} 662}
679 663
680static __inline__ void fib6_start_gc(struct rt6_info *rt) 664static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
681{ 665{
682 if (ip6_fib_timer.expires == 0 && 666 if (net->ipv6.ip6_fib_timer->expires == 0 &&
683 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) 667 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
684 mod_timer(&ip6_fib_timer, jiffies + 668 mod_timer(net->ipv6.ip6_fib_timer, jiffies +
685 init_net.ipv6.sysctl.ip6_rt_gc_interval); 669 net->ipv6.sysctl.ip6_rt_gc_interval);
686} 670}
687 671
688void fib6_force_start_gc(void) 672void fib6_force_start_gc(struct net *net)
689{ 673{
690 if (ip6_fib_timer.expires == 0) 674 if (net->ipv6.ip6_fib_timer->expires == 0)
691 mod_timer(&ip6_fib_timer, jiffies + 675 mod_timer(net->ipv6.ip6_fib_timer, jiffies +
692 init_net.ipv6.sysctl.ip6_rt_gc_interval); 676 net->ipv6.sysctl.ip6_rt_gc_interval);
693} 677}
694 678
695/* 679/*
@@ -733,8 +717,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
733 if (sfn == NULL) 717 if (sfn == NULL)
734 goto st_failure; 718 goto st_failure;
735 719
736 sfn->leaf = &ip6_null_entry; 720 sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
737 atomic_inc(&ip6_null_entry.rt6i_ref); 721 atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
738 sfn->fn_flags = RTN_ROOT; 722 sfn->fn_flags = RTN_ROOT;
739 sfn->fn_sernum = fib6_new_sernum(); 723 sfn->fn_sernum = fib6_new_sernum();
740 724
@@ -776,9 +760,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
776 err = fib6_add_rt2node(fn, rt, info); 760 err = fib6_add_rt2node(fn, rt, info);
777 761
778 if (err == 0) { 762 if (err == 0) {
779 fib6_start_gc(rt); 763 fib6_start_gc(info->nl_net, rt);
780 if (!(rt->rt6i_flags&RTF_CACHE)) 764 if (!(rt->rt6i_flags&RTF_CACHE))
781 fib6_prune_clones(pn, rt); 765 fib6_prune_clones(info->nl_net, pn, rt);
782 } 766 }
783 767
784out: 768out:
@@ -788,12 +772,16 @@ out:
788 * If fib6_add_1 has cleared the old leaf pointer in the 772 * If fib6_add_1 has cleared the old leaf pointer in the
789 * super-tree leaf node we have to find a new one for it. 773 * super-tree leaf node we have to find a new one for it.
790 */ 774 */
775 if (pn != fn && pn->leaf == rt) {
776 pn->leaf = NULL;
777 atomic_dec(&rt->rt6i_ref);
778 }
791 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { 779 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) {
792 pn->leaf = fib6_find_prefix(pn); 780 pn->leaf = fib6_find_prefix(info->nl_net, pn);
793#if RT6_DEBUG >= 2 781#if RT6_DEBUG >= 2
794 if (!pn->leaf) { 782 if (!pn->leaf) {
795 BUG_TRAP(pn->leaf != NULL); 783 BUG_TRAP(pn->leaf != NULL);
796 pn->leaf = &ip6_null_entry; 784 pn->leaf = info->nl_net->ipv6.ip6_null_entry;
797 } 785 }
798#endif 786#endif
799 atomic_inc(&pn->leaf->rt6i_ref); 787 atomic_inc(&pn->leaf->rt6i_ref);
@@ -809,7 +797,7 @@ out:
809 */ 797 */
810st_failure: 798st_failure:
811 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 799 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
812 fib6_repair_tree(fn); 800 fib6_repair_tree(info->nl_net, fn);
813 dst_free(&rt->u.dst); 801 dst_free(&rt->u.dst);
814 return err; 802 return err;
815#endif 803#endif
@@ -975,10 +963,10 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
975 * 963 *
976 */ 964 */
977 965
978static struct rt6_info * fib6_find_prefix(struct fib6_node *fn) 966static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
979{ 967{
980 if (fn->fn_flags&RTN_ROOT) 968 if (fn->fn_flags&RTN_ROOT)
981 return &ip6_null_entry; 969 return net->ipv6.ip6_null_entry;
982 970
983 while(fn) { 971 while(fn) {
984 if(fn->left) 972 if(fn->left)
@@ -997,7 +985,8 @@ static struct rt6_info * fib6_find_prefix(struct fib6_node *fn)
997 * is the node we want to try and remove. 985 * is the node we want to try and remove.
998 */ 986 */
999 987
1000static struct fib6_node * fib6_repair_tree(struct fib6_node *fn) 988static struct fib6_node *fib6_repair_tree(struct net *net,
989 struct fib6_node *fn)
1001{ 990{
1002 int children; 991 int children;
1003 int nstate; 992 int nstate;
@@ -1024,11 +1013,11 @@ static struct fib6_node * fib6_repair_tree(struct fib6_node *fn)
1024 || (children && fn->fn_flags&RTN_ROOT) 1013 || (children && fn->fn_flags&RTN_ROOT)
1025#endif 1014#endif
1026 ) { 1015 ) {
1027 fn->leaf = fib6_find_prefix(fn); 1016 fn->leaf = fib6_find_prefix(net, fn);
1028#if RT6_DEBUG >= 2 1017#if RT6_DEBUG >= 2
1029 if (fn->leaf==NULL) { 1018 if (fn->leaf==NULL) {
1030 BUG_TRAP(fn->leaf); 1019 BUG_TRAP(fn->leaf);
1031 fn->leaf = &ip6_null_entry; 1020 fn->leaf = net->ipv6.ip6_null_entry;
1032 } 1021 }
1033#endif 1022#endif
1034 atomic_inc(&fn->leaf->rt6i_ref); 1023 atomic_inc(&fn->leaf->rt6i_ref);
@@ -1101,14 +1090,15 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1101{ 1090{
1102 struct fib6_walker_t *w; 1091 struct fib6_walker_t *w;
1103 struct rt6_info *rt = *rtp; 1092 struct rt6_info *rt = *rtp;
1093 struct net *net = info->nl_net;
1104 1094
1105 RT6_TRACE("fib6_del_route\n"); 1095 RT6_TRACE("fib6_del_route\n");
1106 1096
1107 /* Unlink it */ 1097 /* Unlink it */
1108 *rtp = rt->u.dst.rt6_next; 1098 *rtp = rt->u.dst.rt6_next;
1109 rt->rt6i_node = NULL; 1099 rt->rt6i_node = NULL;
1110 rt6_stats.fib_rt_entries--; 1100 net->ipv6.rt6_stats->fib_rt_entries--;
1111 rt6_stats.fib_discarded_routes++; 1101 net->ipv6.rt6_stats->fib_discarded_routes++;
1112 1102
1113 /* Reset round-robin state, if necessary */ 1103 /* Reset round-robin state, if necessary */
1114 if (fn->rr_ptr == rt) 1104 if (fn->rr_ptr == rt)
@@ -1131,8 +1121,8 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1131 /* If it was last route, expunge its radix tree node */ 1121 /* If it was last route, expunge its radix tree node */
1132 if (fn->leaf == NULL) { 1122 if (fn->leaf == NULL) {
1133 fn->fn_flags &= ~RTN_RTINFO; 1123 fn->fn_flags &= ~RTN_RTINFO;
1134 rt6_stats.fib_route_nodes--; 1124 net->ipv6.rt6_stats->fib_route_nodes--;
1135 fn = fib6_repair_tree(fn); 1125 fn = fib6_repair_tree(net, fn);
1136 } 1126 }
1137 1127
1138 if (atomic_read(&rt->rt6i_ref) != 1) { 1128 if (atomic_read(&rt->rt6i_ref) != 1) {
@@ -1144,7 +1134,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1144 */ 1134 */
1145 while (fn) { 1135 while (fn) {
1146 if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) { 1136 if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) {
1147 fn->leaf = fib6_find_prefix(fn); 1137 fn->leaf = fib6_find_prefix(net, fn);
1148 atomic_inc(&fn->leaf->rt6i_ref); 1138 atomic_inc(&fn->leaf->rt6i_ref);
1149 rt6_release(rt); 1139 rt6_release(rt);
1150 } 1140 }
@@ -1160,6 +1150,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1160 1150
1161int fib6_del(struct rt6_info *rt, struct nl_info *info) 1151int fib6_del(struct rt6_info *rt, struct nl_info *info)
1162{ 1152{
1153 struct net *net = info->nl_net;
1163 struct fib6_node *fn = rt->rt6i_node; 1154 struct fib6_node *fn = rt->rt6i_node;
1164 struct rt6_info **rtp; 1155 struct rt6_info **rtp;
1165 1156
@@ -1169,7 +1160,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1169 return -ENOENT; 1160 return -ENOENT;
1170 } 1161 }
1171#endif 1162#endif
1172 if (fn == NULL || rt == &ip6_null_entry) 1163 if (fn == NULL || rt == net->ipv6.ip6_null_entry)
1173 return -ENOENT; 1164 return -ENOENT;
1174 1165
1175 BUG_TRAP(fn->fn_flags&RTN_RTINFO); 1166 BUG_TRAP(fn->fn_flags&RTN_RTINFO);
@@ -1184,7 +1175,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1184 pn = pn->parent; 1175 pn = pn->parent;
1185 } 1176 }
1186#endif 1177#endif
1187 fib6_prune_clones(pn, rt); 1178 fib6_prune_clones(info->nl_net, pn, rt);
1188 } 1179 }
1189 1180
1190 /* 1181 /*
@@ -1314,12 +1305,12 @@ static int fib6_walk(struct fib6_walker_t *w)
1314 1305
1315static int fib6_clean_node(struct fib6_walker_t *w) 1306static int fib6_clean_node(struct fib6_walker_t *w)
1316{ 1307{
1317 struct nl_info info = {
1318 .nl_net = &init_net,
1319 };
1320 int res; 1308 int res;
1321 struct rt6_info *rt; 1309 struct rt6_info *rt;
1322 struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); 1310 struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w);
1311 struct nl_info info = {
1312 .nl_net = c->net,
1313 };
1323 1314
1324 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { 1315 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
1325 res = c->func(rt, c->arg); 1316 res = c->func(rt, c->arg);
@@ -1351,7 +1342,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
1351 * ignoring pure split nodes) will be scanned. 1342 * ignoring pure split nodes) will be scanned.
1352 */ 1343 */
1353 1344
1354static void fib6_clean_tree(struct fib6_node *root, 1345static void fib6_clean_tree(struct net *net, struct fib6_node *root,
1355 int (*func)(struct rt6_info *, void *arg), 1346 int (*func)(struct rt6_info *, void *arg),
1356 int prune, void *arg) 1347 int prune, void *arg)
1357{ 1348{
@@ -1362,23 +1353,26 @@ static void fib6_clean_tree(struct fib6_node *root,
1362 c.w.prune = prune; 1353 c.w.prune = prune;
1363 c.func = func; 1354 c.func = func;
1364 c.arg = arg; 1355 c.arg = arg;
1356 c.net = net;
1365 1357
1366 fib6_walk(&c.w); 1358 fib6_walk(&c.w);
1367} 1359}
1368 1360
1369void fib6_clean_all(int (*func)(struct rt6_info *, void *arg), 1361void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
1370 int prune, void *arg) 1362 int prune, void *arg)
1371{ 1363{
1372 struct fib6_table *table; 1364 struct fib6_table *table;
1373 struct hlist_node *node; 1365 struct hlist_node *node;
1366 struct hlist_head *head;
1374 unsigned int h; 1367 unsigned int h;
1375 1368
1376 rcu_read_lock(); 1369 rcu_read_lock();
1377 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 1370 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1378 hlist_for_each_entry_rcu(table, node, &fib_table_hash[h], 1371 head = &net->ipv6.fib_table_hash[h];
1379 tb6_hlist) { 1372 hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
1380 write_lock_bh(&table->tb6_lock); 1373 write_lock_bh(&table->tb6_lock);
1381 fib6_clean_tree(&table->tb6_root, func, prune, arg); 1374 fib6_clean_tree(net, &table->tb6_root,
1375 func, prune, arg);
1382 write_unlock_bh(&table->tb6_lock); 1376 write_unlock_bh(&table->tb6_lock);
1383 } 1377 }
1384 } 1378 }
@@ -1395,9 +1389,10 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1395 return 0; 1389 return 0;
1396} 1390}
1397 1391
1398static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt) 1392static void fib6_prune_clones(struct net *net, struct fib6_node *fn,
1393 struct rt6_info *rt)
1399{ 1394{
1400 fib6_clean_tree(fn, fib6_prune_clone, 1, rt); 1395 fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt);
1401} 1396}
1402 1397
1403/* 1398/*
@@ -1447,54 +1442,145 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1447 1442
1448static DEFINE_SPINLOCK(fib6_gc_lock); 1443static DEFINE_SPINLOCK(fib6_gc_lock);
1449 1444
1450void fib6_run_gc(unsigned long dummy) 1445void fib6_run_gc(unsigned long expires, struct net *net)
1451{ 1446{
1452 if (dummy != ~0UL) { 1447 if (expires != ~0UL) {
1453 spin_lock_bh(&fib6_gc_lock); 1448 spin_lock_bh(&fib6_gc_lock);
1454 gc_args.timeout = dummy ? (int)dummy : 1449 gc_args.timeout = expires ? (int)expires :
1455 init_net.ipv6.sysctl.ip6_rt_gc_interval; 1450 net->ipv6.sysctl.ip6_rt_gc_interval;
1456 } else { 1451 } else {
1457 local_bh_disable(); 1452 local_bh_disable();
1458 if (!spin_trylock(&fib6_gc_lock)) { 1453 if (!spin_trylock(&fib6_gc_lock)) {
1459 mod_timer(&ip6_fib_timer, jiffies + HZ); 1454 mod_timer(net->ipv6.ip6_fib_timer, jiffies + HZ);
1460 local_bh_enable(); 1455 local_bh_enable();
1461 return; 1456 return;
1462 } 1457 }
1463 gc_args.timeout = init_net.ipv6.sysctl.ip6_rt_gc_interval; 1458 gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
1464 } 1459 }
1465 gc_args.more = 0; 1460 gc_args.more = 0;
1466 1461
1467 ndisc_dst_gc(&gc_args.more); 1462 icmp6_dst_gc(&gc_args.more);
1468 fib6_clean_all(fib6_age, 0, NULL); 1463
1464 fib6_clean_all(net, fib6_age, 0, NULL);
1469 1465
1470 if (gc_args.more) 1466 if (gc_args.more)
1471 mod_timer(&ip6_fib_timer, jiffies + 1467 mod_timer(net->ipv6.ip6_fib_timer, jiffies +
1472 init_net.ipv6.sysctl.ip6_rt_gc_interval); 1468 net->ipv6.sysctl.ip6_rt_gc_interval);
1473 else { 1469 else {
1474 del_timer(&ip6_fib_timer); 1470 del_timer(net->ipv6.ip6_fib_timer);
1475 ip6_fib_timer.expires = 0; 1471 net->ipv6.ip6_fib_timer->expires = 0;
1476 } 1472 }
1477 spin_unlock_bh(&fib6_gc_lock); 1473 spin_unlock_bh(&fib6_gc_lock);
1478} 1474}
1479 1475
1480int __init fib6_init(void) 1476static void fib6_gc_timer_cb(unsigned long arg)
1477{
1478 fib6_run_gc(0, (struct net *)arg);
1479}
1480
1481static int fib6_net_init(struct net *net)
1481{ 1482{
1482 int ret; 1483 int ret;
1484 struct timer_list *timer;
1485
1486 ret = -ENOMEM;
1487 timer = kzalloc(sizeof(*timer), GFP_KERNEL);
1488 if (!timer)
1489 goto out;
1490
1491 setup_timer(timer, fib6_gc_timer_cb, (unsigned long)net);
1492 net->ipv6.ip6_fib_timer = timer;
1493
1494 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
1495 if (!net->ipv6.rt6_stats)
1496 goto out_timer;
1497
1498 net->ipv6.fib_table_hash =
1499 kzalloc(sizeof(*net->ipv6.fib_table_hash)*FIB_TABLE_HASHSZ,
1500 GFP_KERNEL);
1501 if (!net->ipv6.fib_table_hash)
1502 goto out_rt6_stats;
1503
1504 net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl),
1505 GFP_KERNEL);
1506 if (!net->ipv6.fib6_main_tbl)
1507 goto out_fib_table_hash;
1508
1509 net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN;
1510 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1511 net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
1512 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1513
1514#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1515 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
1516 GFP_KERNEL);
1517 if (!net->ipv6.fib6_local_tbl)
1518 goto out_fib6_main_tbl;
1519 net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL;
1520 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1521 net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
1522 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1523#endif
1524 fib6_tables_init(net);
1525
1526 ret = 0;
1527out:
1528 return ret;
1529
1530#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1531out_fib6_main_tbl:
1532 kfree(net->ipv6.fib6_main_tbl);
1533#endif
1534out_fib_table_hash:
1535 kfree(net->ipv6.fib_table_hash);
1536out_rt6_stats:
1537 kfree(net->ipv6.rt6_stats);
1538out_timer:
1539 kfree(timer);
1540 goto out;
1541 }
1542
1543static void fib6_net_exit(struct net *net)
1544{
1545 rt6_ifdown(net, NULL);
1546 del_timer(net->ipv6.ip6_fib_timer);
1547 kfree(net->ipv6.ip6_fib_timer);
1548#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1549 kfree(net->ipv6.fib6_local_tbl);
1550#endif
1551 kfree(net->ipv6.fib6_main_tbl);
1552 kfree(net->ipv6.fib_table_hash);
1553 kfree(net->ipv6.rt6_stats);
1554}
1555
1556static struct pernet_operations fib6_net_ops = {
1557 .init = fib6_net_init,
1558 .exit = fib6_net_exit,
1559};
1560
1561int __init fib6_init(void)
1562{
1563 int ret = -ENOMEM;
1564
1483 fib6_node_kmem = kmem_cache_create("fib6_nodes", 1565 fib6_node_kmem = kmem_cache_create("fib6_nodes",
1484 sizeof(struct fib6_node), 1566 sizeof(struct fib6_node),
1485 0, SLAB_HWCACHE_ALIGN, 1567 0, SLAB_HWCACHE_ALIGN,
1486 NULL); 1568 NULL);
1487 if (!fib6_node_kmem) 1569 if (!fib6_node_kmem)
1488 return -ENOMEM; 1570 goto out;
1489 1571
1490 fib6_tables_init(); 1572 ret = register_pernet_subsys(&fib6_net_ops);
1573 if (ret)
1574 goto out_kmem_cache_create;
1491 1575
1492 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); 1576 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib);
1493 if (ret) 1577 if (ret)
1494 goto out_kmem_cache_create; 1578 goto out_unregister_subsys;
1495out: 1579out:
1496 return ret; 1580 return ret;
1497 1581
1582out_unregister_subsys:
1583 unregister_pernet_subsys(&fib6_net_ops);
1498out_kmem_cache_create: 1584out_kmem_cache_create:
1499 kmem_cache_destroy(fib6_node_kmem); 1585 kmem_cache_destroy(fib6_node_kmem);
1500 goto out; 1586 goto out;
@@ -1502,6 +1588,6 @@ out_kmem_cache_create:
1502 1588
1503void fib6_gc_cleanup(void) 1589void fib6_gc_cleanup(void)
1504{ 1590{
1505 del_timer(&ip6_fib_timer); 1591 unregister_pernet_subsys(&fib6_net_ops);
1506 kmem_cache_destroy(fib6_node_kmem); 1592 kmem_cache_destroy(fib6_node_kmem);
1507} 1593}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 2b7d9ee98832..eb7a940310f4 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -62,23 +62,23 @@ static DEFINE_RWLOCK(ip6_fl_lock);
62static DEFINE_RWLOCK(ip6_sk_fl_lock); 62static DEFINE_RWLOCK(ip6_sk_fl_lock);
63 63
64 64
65static __inline__ struct ip6_flowlabel * __fl_lookup(__be32 label) 65static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
66{ 66{
67 struct ip6_flowlabel *fl; 67 struct ip6_flowlabel *fl;
68 68
69 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) { 69 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
70 if (fl->label == label) 70 if (fl->label == label && fl->fl_net == net)
71 return fl; 71 return fl;
72 } 72 }
73 return NULL; 73 return NULL;
74} 74}
75 75
76static struct ip6_flowlabel * fl_lookup(__be32 label) 76static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
77{ 77{
78 struct ip6_flowlabel *fl; 78 struct ip6_flowlabel *fl;
79 79
80 read_lock_bh(&ip6_fl_lock); 80 read_lock_bh(&ip6_fl_lock);
81 fl = __fl_lookup(label); 81 fl = __fl_lookup(net, label);
82 if (fl) 82 if (fl)
83 atomic_inc(&fl->users); 83 atomic_inc(&fl->users);
84 read_unlock_bh(&ip6_fl_lock); 84 read_unlock_bh(&ip6_fl_lock);
@@ -88,8 +88,10 @@ static struct ip6_flowlabel * fl_lookup(__be32 label)
88 88
89static void fl_free(struct ip6_flowlabel *fl) 89static void fl_free(struct ip6_flowlabel *fl)
90{ 90{
91 if (fl) 91 if (fl) {
92 release_net(fl->fl_net);
92 kfree(fl->opt); 93 kfree(fl->opt);
94 }
93 kfree(fl); 95 kfree(fl);
94} 96}
95 97
@@ -112,7 +114,6 @@ static void fl_release(struct ip6_flowlabel *fl)
112 time_after(ip6_fl_gc_timer.expires, ttd)) 114 time_after(ip6_fl_gc_timer.expires, ttd))
113 mod_timer(&ip6_fl_gc_timer, ttd); 115 mod_timer(&ip6_fl_gc_timer, ttd);
114 } 116 }
115
116 write_unlock_bh(&ip6_fl_lock); 117 write_unlock_bh(&ip6_fl_lock);
117} 118}
118 119
@@ -148,13 +149,34 @@ static void ip6_fl_gc(unsigned long dummy)
148 if (!sched && atomic_read(&fl_size)) 149 if (!sched && atomic_read(&fl_size))
149 sched = now + FL_MAX_LINGER; 150 sched = now + FL_MAX_LINGER;
150 if (sched) { 151 if (sched) {
151 ip6_fl_gc_timer.expires = sched; 152 mod_timer(&ip6_fl_gc_timer, sched);
152 add_timer(&ip6_fl_gc_timer); 153 }
154 write_unlock(&ip6_fl_lock);
155}
156
157static void ip6_fl_purge(struct net *net)
158{
159 int i;
160
161 write_lock(&ip6_fl_lock);
162 for (i = 0; i <= FL_HASH_MASK; i++) {
163 struct ip6_flowlabel *fl, **flp;
164 flp = &fl_ht[i];
165 while ((fl = *flp) != NULL) {
166 if (fl->fl_net == net && atomic_read(&fl->users) == 0) {
167 *flp = fl->next;
168 fl_free(fl);
169 atomic_dec(&fl_size);
170 continue;
171 }
172 flp = &fl->next;
173 }
153 } 174 }
154 write_unlock(&ip6_fl_lock); 175 write_unlock(&ip6_fl_lock);
155} 176}
156 177
157static struct ip6_flowlabel *fl_intern(struct ip6_flowlabel *fl, __be32 label) 178static struct ip6_flowlabel *fl_intern(struct net *net,
179 struct ip6_flowlabel *fl, __be32 label)
158{ 180{
159 struct ip6_flowlabel *lfl; 181 struct ip6_flowlabel *lfl;
160 182
@@ -165,7 +187,7 @@ static struct ip6_flowlabel *fl_intern(struct ip6_flowlabel *fl, __be32 label)
165 for (;;) { 187 for (;;) {
166 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK; 188 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
167 if (fl->label) { 189 if (fl->label) {
168 lfl = __fl_lookup(fl->label); 190 lfl = __fl_lookup(net, fl->label);
169 if (lfl == NULL) 191 if (lfl == NULL)
170 break; 192 break;
171 } 193 }
@@ -179,7 +201,7 @@ static struct ip6_flowlabel *fl_intern(struct ip6_flowlabel *fl, __be32 label)
179 * done in ipv6_flowlabel_opt - sock is locked, so new entry 201 * done in ipv6_flowlabel_opt - sock is locked, so new entry
180 * with the same label can only appear on another sock 202 * with the same label can only appear on another sock
181 */ 203 */
182 lfl = __fl_lookup(fl->label); 204 lfl = __fl_lookup(net, fl->label);
183 if (lfl != NULL) { 205 if (lfl != NULL) {
184 atomic_inc(&lfl->users); 206 atomic_inc(&lfl->users);
185 write_unlock_bh(&ip6_fl_lock); 207 write_unlock_bh(&ip6_fl_lock);
@@ -298,7 +320,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
298} 320}
299 321
300static struct ip6_flowlabel * 322static struct ip6_flowlabel *
301fl_create(struct in6_flowlabel_req *freq, char __user *optval, int optlen, int *err_p) 323fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
324 int optlen, int *err_p)
302{ 325{
303 struct ip6_flowlabel *fl; 326 struct ip6_flowlabel *fl;
304 int olen; 327 int olen;
@@ -343,6 +366,7 @@ fl_create(struct in6_flowlabel_req *freq, char __user *optval, int optlen, int *
343 } 366 }
344 } 367 }
345 368
369 fl->fl_net = hold_net(net);
346 fl->expires = jiffies; 370 fl->expires = jiffies;
347 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); 371 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
348 if (err) 372 if (err)
@@ -441,6 +465,7 @@ static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
441int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) 465int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
442{ 466{
443 int err; 467 int err;
468 struct net *net = sock_net(sk);
444 struct ipv6_pinfo *np = inet6_sk(sk); 469 struct ipv6_pinfo *np = inet6_sk(sk);
445 struct in6_flowlabel_req freq; 470 struct in6_flowlabel_req freq;
446 struct ipv6_fl_socklist *sfl1=NULL; 471 struct ipv6_fl_socklist *sfl1=NULL;
@@ -483,7 +508,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
483 read_unlock_bh(&ip6_sk_fl_lock); 508 read_unlock_bh(&ip6_sk_fl_lock);
484 509
485 if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) { 510 if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) {
486 fl = fl_lookup(freq.flr_label); 511 fl = fl_lookup(net, freq.flr_label);
487 if (fl) { 512 if (fl) {
488 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires); 513 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
489 fl_release(fl); 514 fl_release(fl);
@@ -496,7 +521,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
496 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 521 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
497 return -EINVAL; 522 return -EINVAL;
498 523
499 fl = fl_create(&freq, optval, optlen, &err); 524 fl = fl_create(net, &freq, optval, optlen, &err);
500 if (fl == NULL) 525 if (fl == NULL)
501 return err; 526 return err;
502 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); 527 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
@@ -518,7 +543,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
518 read_unlock_bh(&ip6_sk_fl_lock); 543 read_unlock_bh(&ip6_sk_fl_lock);
519 544
520 if (fl1 == NULL) 545 if (fl1 == NULL)
521 fl1 = fl_lookup(freq.flr_label); 546 fl1 = fl_lookup(net, freq.flr_label);
522 if (fl1) { 547 if (fl1) {
523recheck: 548recheck:
524 err = -EEXIST; 549 err = -EEXIST;
@@ -559,7 +584,7 @@ release:
559 if (sfl1 == NULL || (err = mem_check(sk)) != 0) 584 if (sfl1 == NULL || (err = mem_check(sk)) != 0)
560 goto done; 585 goto done;
561 586
562 fl1 = fl_intern(fl, freq.flr_label); 587 fl1 = fl_intern(net, fl, freq.flr_label);
563 if (fl1 != NULL) 588 if (fl1 != NULL)
564 goto recheck; 589 goto recheck;
565 590
@@ -586,6 +611,7 @@ done:
586#ifdef CONFIG_PROC_FS 611#ifdef CONFIG_PROC_FS
587 612
588struct ip6fl_iter_state { 613struct ip6fl_iter_state {
614 struct seq_net_private p;
589 int bucket; 615 int bucket;
590}; 616};
591 617
@@ -595,12 +621,15 @@ static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
595{ 621{
596 struct ip6_flowlabel *fl = NULL; 622 struct ip6_flowlabel *fl = NULL;
597 struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 623 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
624 struct net *net = seq_file_net(seq);
598 625
599 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { 626 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
600 if (fl_ht[state->bucket]) { 627 fl = fl_ht[state->bucket];
601 fl = fl_ht[state->bucket]; 628
629 while (fl && fl->fl_net != net)
630 fl = fl->next;
631 if (fl)
602 break; 632 break;
603 }
604 } 633 }
605 return fl; 634 return fl;
606} 635}
@@ -608,12 +637,18 @@ static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
608static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl) 637static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
609{ 638{
610 struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 639 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
640 struct net *net = seq_file_net(seq);
611 641
612 fl = fl->next; 642 fl = fl->next;
643try_again:
644 while (fl && fl->fl_net != net)
645 fl = fl->next;
646
613 while (!fl) { 647 while (!fl) {
614 if (++state->bucket <= FL_HASH_MASK) 648 if (++state->bucket <= FL_HASH_MASK) {
615 fl = fl_ht[state->bucket]; 649 fl = fl_ht[state->bucket];
616 else 650 goto try_again;
651 } else
617 break; 652 break;
618 } 653 }
619 return fl; 654 return fl;
@@ -683,8 +718,8 @@ static const struct seq_operations ip6fl_seq_ops = {
683 718
684static int ip6fl_seq_open(struct inode *inode, struct file *file) 719static int ip6fl_seq_open(struct inode *inode, struct file *file)
685{ 720{
686 return seq_open_private(file, &ip6fl_seq_ops, 721 return seq_open_net(inode, file, &ip6fl_seq_ops,
687 sizeof(struct ip6fl_iter_state)); 722 sizeof(struct ip6fl_iter_state));
688} 723}
689 724
690static const struct file_operations ip6fl_seq_fops = { 725static const struct file_operations ip6fl_seq_fops = {
@@ -692,12 +727,13 @@ static const struct file_operations ip6fl_seq_fops = {
692 .open = ip6fl_seq_open, 727 .open = ip6fl_seq_open,
693 .read = seq_read, 728 .read = seq_read,
694 .llseek = seq_lseek, 729 .llseek = seq_lseek,
695 .release = seq_release_private, 730 .release = seq_release_net,
696}; 731};
697 732
698static int ip6_flowlabel_proc_init(struct net *net) 733static int ip6_flowlabel_proc_init(struct net *net)
699{ 734{
700 if (!proc_net_fops_create(net, "ip6_flowlabel", S_IRUGO, &ip6fl_seq_fops)) 735 if (!proc_net_fops_create(net, "ip6_flowlabel",
736 S_IRUGO, &ip6fl_seq_fops))
701 return -ENOMEM; 737 return -ENOMEM;
702 return 0; 738 return 0;
703} 739}
@@ -717,13 +753,24 @@ static inline void ip6_flowlabel_proc_fini(struct net *net)
717} 753}
718#endif 754#endif
719 755
756static inline void ip6_flowlabel_net_exit(struct net *net)
757{
758 ip6_fl_purge(net);
759 ip6_flowlabel_proc_fini(net);
760}
761
762static struct pernet_operations ip6_flowlabel_net_ops = {
763 .init = ip6_flowlabel_proc_init,
764 .exit = ip6_flowlabel_net_exit,
765};
766
720int ip6_flowlabel_init(void) 767int ip6_flowlabel_init(void)
721{ 768{
722 return ip6_flowlabel_proc_init(&init_net); 769 return register_pernet_subsys(&ip6_flowlabel_net_ops);
723} 770}
724 771
725void ip6_flowlabel_cleanup(void) 772void ip6_flowlabel_cleanup(void)
726{ 773{
727 del_timer(&ip6_fl_gc_timer); 774 del_timer(&ip6_fl_gc_timer);
728 ip6_flowlabel_proc_fini(&init_net); 775 unregister_pernet_subsys(&ip6_flowlabel_net_ops);
729} 776}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 98ab4f459905..4e5c8615832c 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/in6.h> 30#include <linux/in6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/mroute6.h>
32 33
33#include <linux/netfilter.h> 34#include <linux/netfilter.h>
34#include <linux/netfilter_ipv6.h> 35#include <linux/netfilter_ipv6.h>
@@ -61,11 +62,6 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
61 u32 pkt_len; 62 u32 pkt_len;
62 struct inet6_dev *idev; 63 struct inet6_dev *idev;
63 64
64 if (dev->nd_net != &init_net) {
65 kfree_skb(skb);
66 return 0;
67 }
68
69 if (skb->pkt_type == PACKET_OTHERHOST) { 65 if (skb->pkt_type == PACKET_OTHERHOST) {
70 kfree_skb(skb); 66 kfree_skb(skb);
71 return 0; 67 return 0;
@@ -241,38 +237,84 @@ int ip6_mc_input(struct sk_buff *skb)
241 hdr = ipv6_hdr(skb); 237 hdr = ipv6_hdr(skb);
242 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); 238 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
243 239
240#ifdef CONFIG_IPV6_MROUTE
244 /* 241 /*
245 * IPv6 multicast router mode isnt currently supported. 242 * IPv6 multicast router mode is now supported ;)
246 */ 243 */
247#if 0 244 if (ipv6_devconf.mc_forwarding &&
248 if (ipv6_config.multicast_route) { 245 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
249 int addr_type; 246 /*
250 247 * Okay, we try to forward - split and duplicate
251 addr_type = ipv6_addr_type(&hdr->daddr); 248 * packets.
252 249 */
253 if (!(addr_type & (IPV6_ADDR_LOOPBACK | IPV6_ADDR_LINKLOCAL))) { 250 struct sk_buff *skb2;
254 struct sk_buff *skb2; 251 struct inet6_skb_parm *opt = IP6CB(skb);
255 struct dst_entry *dst; 252
253 /* Check for MLD */
254 if (unlikely(opt->ra)) {
255 /* Check if this is a mld message */
256 u8 *ptr = skb_network_header(skb) + opt->ra;
257 struct icmp6hdr *icmp6;
258 u8 nexthdr = hdr->nexthdr;
259 int offset;
260
261 /* Check if the value of Router Alert
262 * is for MLD (0x0000).
263 */
264 if ((ptr[2] | ptr[3]) == 0) {
265 deliver = 0;
266
267 if (!ipv6_ext_hdr(nexthdr)) {
268 /* BUG */
269 goto out;
270 }
271 offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
272 &nexthdr);
273 if (offset < 0)
274 goto out;
275
276 if (nexthdr != IPPROTO_ICMPV6)
277 goto out;
278
279 if (!pskb_may_pull(skb, (skb_network_header(skb) +
280 offset + 1 - skb->data)))
281 goto out;
282
283 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
284
285 switch (icmp6->icmp6_type) {
286 case ICMPV6_MGM_QUERY:
287 case ICMPV6_MGM_REPORT:
288 case ICMPV6_MGM_REDUCTION:
289 case ICMPV6_MLD2_REPORT:
290 deliver = 1;
291 break;
292 }
293 goto out;
294 }
295 /* unknown RA - process it normally */
296 }
256 297
257 dst = skb->dst; 298 if (deliver)
299 skb2 = skb_clone(skb, GFP_ATOMIC);
300 else {
301 skb2 = skb;
302 skb = NULL;
303 }
258 304
259 if (deliver) { 305 if (skb2) {
260 skb2 = skb_clone(skb, GFP_ATOMIC); 306 skb2->dev = skb2->dst->dev;
261 dst_output(skb2); 307 ip6_mr_input(skb2);
262 } else {
263 dst_output(skb);
264 return 0;
265 }
266 } 308 }
267 } 309 }
310out:
268#endif 311#endif
269 312 if (likely(deliver))
270 if (likely(deliver)) {
271 ip6_input(skb); 313 ip6_input(skb);
272 return 0; 314 else {
315 /* discard */
316 kfree_skb(skb);
273 } 317 }
274 /* discard */
275 kfree_skb(skb);
276 318
277 return 0; 319 return 0;
278} 320}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 8b67ca07467d..0af2e055f883 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -55,6 +55,7 @@
55#include <net/icmp.h> 55#include <net/icmp.h>
56#include <net/xfrm.h> 56#include <net/xfrm.h>
57#include <net/checksum.h> 57#include <net/checksum.h>
58#include <linux/mroute6.h>
58 59
59static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 60static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60 61
@@ -137,8 +138,9 @@ static int ip6_output2(struct sk_buff *skb)
137 struct inet6_dev *idev = ip6_dst_idev(skb->dst); 138 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
138 139
139 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && 140 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
140 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, 141 ((mroute6_socket && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
141 &ipv6_hdr(skb)->saddr)) { 142 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
143 &ipv6_hdr(skb)->saddr))) {
142 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 144 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
143 145
144 /* Do not check for IFF_ALLMULTI; multicast routing 146 /* Do not check for IFF_ALLMULTI; multicast routing
@@ -237,9 +239,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
237 if (np) 239 if (np)
238 hlimit = np->hop_limit; 240 hlimit = np->hop_limit;
239 if (hlimit < 0) 241 if (hlimit < 0)
240 hlimit = dst_metric(dst, RTAX_HOPLIMIT); 242 hlimit = ip6_dst_hoplimit(dst);
241 if (hlimit < 0)
242 hlimit = ipv6_get_hoplimit(dst->dev);
243 243
244 tclass = -1; 244 tclass = -1;
245 if (np) 245 if (np)
@@ -286,7 +286,7 @@ EXPORT_SYMBOL(ip6_xmit);
286 */ 286 */
287 287
288int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, 288int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
289 struct in6_addr *saddr, struct in6_addr *daddr, 289 const struct in6_addr *saddr, const struct in6_addr *daddr,
290 int proto, int len) 290 int proto, int len)
291{ 291{
292 struct ipv6_pinfo *np = inet6_sk(sk); 292 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -404,6 +404,7 @@ int ip6_forward(struct sk_buff *skb)
404 struct dst_entry *dst = skb->dst; 404 struct dst_entry *dst = skb->dst;
405 struct ipv6hdr *hdr = ipv6_hdr(skb); 405 struct ipv6hdr *hdr = ipv6_hdr(skb);
406 struct inet6_skb_parm *opt = IP6CB(skb); 406 struct inet6_skb_parm *opt = IP6CB(skb);
407 struct net *net = dev_net(dst->dev);
407 408
408 if (ipv6_devconf.forwarding == 0) 409 if (ipv6_devconf.forwarding == 0)
409 goto error; 410 goto error;
@@ -450,7 +451,7 @@ int ip6_forward(struct sk_buff *skb)
450 451
451 /* XXX: idev->cnf.proxy_ndp? */ 452 /* XXX: idev->cnf.proxy_ndp? */
452 if (ipv6_devconf.proxy_ndp && 453 if (ipv6_devconf.proxy_ndp &&
453 pneigh_lookup(&nd_tbl, &init_net, &hdr->daddr, skb->dev, 0)) { 454 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
454 int proxied = ip6_forward_proxy_check(skb); 455 int proxied = ip6_forward_proxy_check(skb);
455 if (proxied > 0) 456 if (proxied > 0)
456 return ip6_input(skb); 457 return ip6_input(skb);
@@ -596,7 +597,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
596 597
597 return offset; 598 return offset;
598} 599}
599EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
600 600
601static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 601static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
602{ 602{
@@ -912,15 +912,19 @@ static int ip6_dst_lookup_tail(struct sock *sk,
912 struct dst_entry **dst, struct flowi *fl) 912 struct dst_entry **dst, struct flowi *fl)
913{ 913{
914 int err; 914 int err;
915 struct net *net = sock_net(sk);
915 916
916 if (*dst == NULL) 917 if (*dst == NULL)
917 *dst = ip6_route_output(sk, fl); 918 *dst = ip6_route_output(net, sk, fl);
918 919
919 if ((err = (*dst)->error)) 920 if ((err = (*dst)->error))
920 goto out_err_release; 921 goto out_err_release;
921 922
922 if (ipv6_addr_any(&fl->fl6_src)) { 923 if (ipv6_addr_any(&fl->fl6_src)) {
923 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); 924 err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev,
925 &fl->fl6_dst,
926 sk ? inet6_sk(sk)->srcprefs : 0,
927 &fl->fl6_src);
924 if (err) 928 if (err)
925 goto out_err_release; 929 goto out_err_release;
926 } 930 }
@@ -939,7 +943,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
939 struct flowi fl_gw; 943 struct flowi fl_gw;
940 int redirect; 944 int redirect;
941 945
942 ifp = ipv6_get_ifaddr(&init_net, &fl->fl6_src, 946 ifp = ipv6_get_ifaddr(net, &fl->fl6_src,
943 (*dst)->dev, 1); 947 (*dst)->dev, 1);
944 948
945 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); 949 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
@@ -954,7 +958,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
954 dst_release(*dst); 958 dst_release(*dst);
955 memcpy(&fl_gw, fl, sizeof(struct flowi)); 959 memcpy(&fl_gw, fl, sizeof(struct flowi));
956 memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); 960 memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
957 *dst = ip6_route_output(sk, &fl_gw); 961 *dst = ip6_route_output(net, sk, &fl_gw);
958 if ((err = (*dst)->error)) 962 if ((err = (*dst)->error))
959 goto out_err_release; 963 goto out_err_release;
960 } 964 }
@@ -1113,7 +1117,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1113 /* need source address above miyazawa*/ 1117 /* need source address above miyazawa*/
1114 } 1118 }
1115 dst_hold(&rt->u.dst); 1119 dst_hold(&rt->u.dst);
1116 np->cork.rt = rt; 1120 inet->cork.dst = &rt->u.dst;
1117 inet->cork.fl = *fl; 1121 inet->cork.fl = *fl;
1118 np->cork.hop_limit = hlimit; 1122 np->cork.hop_limit = hlimit;
1119 np->cork.tclass = tclass; 1123 np->cork.tclass = tclass;
@@ -1134,7 +1138,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1134 length += exthdrlen; 1138 length += exthdrlen;
1135 transhdrlen += exthdrlen; 1139 transhdrlen += exthdrlen;
1136 } else { 1140 } else {
1137 rt = np->cork.rt; 1141 rt = (struct rt6_info *)inet->cork.dst;
1138 fl = &inet->cork.fl; 1142 fl = &inet->cork.fl;
1139 if (inet->cork.flags & IPCORK_OPT) 1143 if (inet->cork.flags & IPCORK_OPT)
1140 opt = np->cork.opt; 1144 opt = np->cork.opt;
@@ -1379,9 +1383,9 @@ static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1379 inet->cork.flags &= ~IPCORK_OPT; 1383 inet->cork.flags &= ~IPCORK_OPT;
1380 kfree(np->cork.opt); 1384 kfree(np->cork.opt);
1381 np->cork.opt = NULL; 1385 np->cork.opt = NULL;
1382 if (np->cork.rt) { 1386 if (inet->cork.dst) {
1383 dst_release(&np->cork.rt->u.dst); 1387 dst_release(inet->cork.dst);
1384 np->cork.rt = NULL; 1388 inet->cork.dst = NULL;
1385 inet->cork.flags &= ~IPCORK_ALLFRAG; 1389 inet->cork.flags &= ~IPCORK_ALLFRAG;
1386 } 1390 }
1387 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); 1391 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
@@ -1396,7 +1400,7 @@ int ip6_push_pending_frames(struct sock *sk)
1396 struct ipv6_pinfo *np = inet6_sk(sk); 1400 struct ipv6_pinfo *np = inet6_sk(sk);
1397 struct ipv6hdr *hdr; 1401 struct ipv6hdr *hdr;
1398 struct ipv6_txoptions *opt = np->cork.opt; 1402 struct ipv6_txoptions *opt = np->cork.opt;
1399 struct rt6_info *rt = np->cork.rt; 1403 struct rt6_info *rt = (struct rt6_info *)inet->cork.dst;
1400 struct flowi *fl = &inet->cork.fl; 1404 struct flowi *fl = &inet->cork.fl;
1401 unsigned char proto = fl->proto; 1405 unsigned char proto = fl->proto;
1402 int err = 0; 1406 int err = 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 78f438880923..2bda3ba100b1 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -52,6 +52,8 @@
52#include <net/xfrm.h> 52#include <net/xfrm.h>
53#include <net/dsfield.h> 53#include <net/dsfield.h>
54#include <net/inet_ecn.h> 54#include <net/inet_ecn.h>
55#include <net/net_namespace.h>
56#include <net/netns/generic.h>
55 57
56MODULE_AUTHOR("Ville Nuorvala"); 58MODULE_AUTHOR("Ville Nuorvala");
57MODULE_DESCRIPTION("IPv6 tunneling device"); 59MODULE_DESCRIPTION("IPv6 tunneling device");
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
60#define IPV6_TLV_TEL_DST_SIZE 8 62#define IPV6_TLV_TEL_DST_SIZE 8
61 63
62#ifdef IP6_TNL_DEBUG 64#ifdef IP6_TNL_DEBUG
63#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __FUNCTION__) 65#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
64#else 66#else
65#define IP6_TNL_TRACE(x...) do {;} while(0) 67#define IP6_TNL_TRACE(x...) do {;} while(0)
66#endif 68#endif
@@ -78,14 +80,15 @@ static int ip6_fb_tnl_dev_init(struct net_device *dev);
78static int ip6_tnl_dev_init(struct net_device *dev); 80static int ip6_tnl_dev_init(struct net_device *dev);
79static void ip6_tnl_dev_setup(struct net_device *dev); 81static void ip6_tnl_dev_setup(struct net_device *dev);
80 82
81/* the IPv6 tunnel fallback device */ 83static int ip6_tnl_net_id;
82static struct net_device *ip6_fb_tnl_dev; 84struct ip6_tnl_net {
83 85 /* the IPv6 tunnel fallback device */
84 86 struct net_device *fb_tnl_dev;
85/* lists for storing tunnels in use */ 87 /* lists for storing tunnels in use */
86static struct ip6_tnl *tnls_r_l[HASH_SIZE]; 88 struct ip6_tnl *tnls_r_l[HASH_SIZE];
87static struct ip6_tnl *tnls_wc[1]; 89 struct ip6_tnl *tnls_wc[1];
88static struct ip6_tnl **tnls[2] = { tnls_wc, tnls_r_l }; 90 struct ip6_tnl **tnls[2];
91};
89 92
90/* lock for the tunnel lists */ 93/* lock for the tunnel lists */
91static DEFINE_RWLOCK(ip6_tnl_lock); 94static DEFINE_RWLOCK(ip6_tnl_lock);
@@ -130,19 +133,20 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
130 **/ 133 **/
131 134
132static struct ip6_tnl * 135static struct ip6_tnl *
133ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local) 136ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
134{ 137{
135 unsigned h0 = HASH(remote); 138 unsigned h0 = HASH(remote);
136 unsigned h1 = HASH(local); 139 unsigned h1 = HASH(local);
137 struct ip6_tnl *t; 140 struct ip6_tnl *t;
141 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
138 142
139 for (t = tnls_r_l[h0 ^ h1]; t; t = t->next) { 143 for (t = ip6n->tnls_r_l[h0 ^ h1]; t; t = t->next) {
140 if (ipv6_addr_equal(local, &t->parms.laddr) && 144 if (ipv6_addr_equal(local, &t->parms.laddr) &&
141 ipv6_addr_equal(remote, &t->parms.raddr) && 145 ipv6_addr_equal(remote, &t->parms.raddr) &&
142 (t->dev->flags & IFF_UP)) 146 (t->dev->flags & IFF_UP))
143 return t; 147 return t;
144 } 148 }
145 if ((t = tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP)) 149 if ((t = ip6n->tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP))
146 return t; 150 return t;
147 151
148 return NULL; 152 return NULL;
@@ -160,7 +164,7 @@ ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local)
160 **/ 164 **/
161 165
162static struct ip6_tnl ** 166static struct ip6_tnl **
163ip6_tnl_bucket(struct ip6_tnl_parm *p) 167ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p)
164{ 168{
165 struct in6_addr *remote = &p->raddr; 169 struct in6_addr *remote = &p->raddr;
166 struct in6_addr *local = &p->laddr; 170 struct in6_addr *local = &p->laddr;
@@ -171,7 +175,7 @@ ip6_tnl_bucket(struct ip6_tnl_parm *p)
171 prio = 1; 175 prio = 1;
172 h = HASH(remote) ^ HASH(local); 176 h = HASH(remote) ^ HASH(local);
173 } 177 }
174 return &tnls[prio][h]; 178 return &ip6n->tnls[prio][h];
175} 179}
176 180
177/** 181/**
@@ -180,9 +184,9 @@ ip6_tnl_bucket(struct ip6_tnl_parm *p)
180 **/ 184 **/
181 185
182static void 186static void
183ip6_tnl_link(struct ip6_tnl *t) 187ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
184{ 188{
185 struct ip6_tnl **tp = ip6_tnl_bucket(&t->parms); 189 struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms);
186 190
187 t->next = *tp; 191 t->next = *tp;
188 write_lock_bh(&ip6_tnl_lock); 192 write_lock_bh(&ip6_tnl_lock);
@@ -196,11 +200,11 @@ ip6_tnl_link(struct ip6_tnl *t)
196 **/ 200 **/
197 201
198static void 202static void
199ip6_tnl_unlink(struct ip6_tnl *t) 203ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
200{ 204{
201 struct ip6_tnl **tp; 205 struct ip6_tnl **tp;
202 206
203 for (tp = ip6_tnl_bucket(&t->parms); *tp; tp = &(*tp)->next) { 207 for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) {
204 if (t == *tp) { 208 if (t == *tp) {
205 write_lock_bh(&ip6_tnl_lock); 209 write_lock_bh(&ip6_tnl_lock);
206 *tp = t->next; 210 *tp = t->next;
@@ -222,12 +226,13 @@ ip6_tnl_unlink(struct ip6_tnl *t)
222 * created tunnel or NULL 226 * created tunnel or NULL
223 **/ 227 **/
224 228
225static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p) 229static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
226{ 230{
227 struct net_device *dev; 231 struct net_device *dev;
228 struct ip6_tnl *t; 232 struct ip6_tnl *t;
229 char name[IFNAMSIZ]; 233 char name[IFNAMSIZ];
230 int err; 234 int err;
235 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
231 236
232 if (p->name[0]) 237 if (p->name[0])
233 strlcpy(name, p->name, IFNAMSIZ); 238 strlcpy(name, p->name, IFNAMSIZ);
@@ -238,6 +243,8 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
238 if (dev == NULL) 243 if (dev == NULL)
239 goto failed; 244 goto failed;
240 245
246 dev_net_set(dev, net);
247
241 if (strchr(name, '%')) { 248 if (strchr(name, '%')) {
242 if (dev_alloc_name(dev, name) < 0) 249 if (dev_alloc_name(dev, name) < 0)
243 goto failed_free; 250 goto failed_free;
@@ -251,7 +258,7 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
251 goto failed_free; 258 goto failed_free;
252 259
253 dev_hold(dev); 260 dev_hold(dev);
254 ip6_tnl_link(t); 261 ip6_tnl_link(ip6n, t);
255 return t; 262 return t;
256 263
257failed_free: 264failed_free:
@@ -274,20 +281,22 @@ failed:
274 * matching tunnel or NULL 281 * matching tunnel or NULL
275 **/ 282 **/
276 283
277static struct ip6_tnl *ip6_tnl_locate(struct ip6_tnl_parm *p, int create) 284static struct ip6_tnl *ip6_tnl_locate(struct net *net,
285 struct ip6_tnl_parm *p, int create)
278{ 286{
279 struct in6_addr *remote = &p->raddr; 287 struct in6_addr *remote = &p->raddr;
280 struct in6_addr *local = &p->laddr; 288 struct in6_addr *local = &p->laddr;
281 struct ip6_tnl *t; 289 struct ip6_tnl *t;
290 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
282 291
283 for (t = *ip6_tnl_bucket(p); t; t = t->next) { 292 for (t = *ip6_tnl_bucket(ip6n, p); t; t = t->next) {
284 if (ipv6_addr_equal(local, &t->parms.laddr) && 293 if (ipv6_addr_equal(local, &t->parms.laddr) &&
285 ipv6_addr_equal(remote, &t->parms.raddr)) 294 ipv6_addr_equal(remote, &t->parms.raddr))
286 return t; 295 return t;
287 } 296 }
288 if (!create) 297 if (!create)
289 return NULL; 298 return NULL;
290 return ip6_tnl_create(p); 299 return ip6_tnl_create(net, p);
291} 300}
292 301
293/** 302/**
@@ -302,13 +311,15 @@ static void
302ip6_tnl_dev_uninit(struct net_device *dev) 311ip6_tnl_dev_uninit(struct net_device *dev)
303{ 312{
304 struct ip6_tnl *t = netdev_priv(dev); 313 struct ip6_tnl *t = netdev_priv(dev);
314 struct net *net = dev_net(dev);
315 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
305 316
306 if (dev == ip6_fb_tnl_dev) { 317 if (dev == ip6n->fb_tnl_dev) {
307 write_lock_bh(&ip6_tnl_lock); 318 write_lock_bh(&ip6_tnl_lock);
308 tnls_wc[0] = NULL; 319 ip6n->tnls_wc[0] = NULL;
309 write_unlock_bh(&ip6_tnl_lock); 320 write_unlock_bh(&ip6_tnl_lock);
310 } else { 321 } else {
311 ip6_tnl_unlink(t); 322 ip6_tnl_unlink(ip6n, t);
312 } 323 }
313 ip6_tnl_dst_reset(t); 324 ip6_tnl_dst_reset(t);
314 dev_put(dev); 325 dev_put(dev);
@@ -401,7 +412,8 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
401 processing of the error. */ 412 processing of the error. */
402 413
403 read_lock(&ip6_tnl_lock); 414 read_lock(&ip6_tnl_lock);
404 if ((t = ip6_tnl_lookup(&ipv6h->daddr, &ipv6h->saddr)) == NULL) 415 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
416 &ipv6h->saddr)) == NULL)
405 goto out; 417 goto out;
406 418
407 if (t->parms.proto != ipproto && t->parms.proto != 0) 419 if (t->parms.proto != ipproto && t->parms.proto != 0)
@@ -533,7 +545,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
533 fl.fl4_dst = eiph->saddr; 545 fl.fl4_dst = eiph->saddr;
534 fl.fl4_tos = RT_TOS(eiph->tos); 546 fl.fl4_tos = RT_TOS(eiph->tos);
535 fl.proto = IPPROTO_IPIP; 547 fl.proto = IPPROTO_IPIP;
536 if (ip_route_output_key(&init_net, &rt, &fl)) 548 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl))
537 goto out; 549 goto out;
538 550
539 skb2->dev = rt->u.dst.dev; 551 skb2->dev = rt->u.dst.dev;
@@ -545,7 +557,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
545 fl.fl4_dst = eiph->daddr; 557 fl.fl4_dst = eiph->daddr;
546 fl.fl4_src = eiph->saddr; 558 fl.fl4_src = eiph->saddr;
547 fl.fl4_tos = eiph->tos; 559 fl.fl4_tos = eiph->tos;
548 if (ip_route_output_key(&init_net, &rt, &fl) || 560 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
549 rt->u.dst.dev->type != ARPHRD_TUNNEL) { 561 rt->u.dst.dev->type != ARPHRD_TUNNEL) {
550 ip_rt_put(rt); 562 ip_rt_put(rt);
551 goto out; 563 goto out;
@@ -602,7 +614,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
602 skb_reset_network_header(skb2); 614 skb_reset_network_header(skb2);
603 615
604 /* Try to guess incoming interface */ 616 /* Try to guess incoming interface */
605 rt = rt6_lookup(&ipv6_hdr(skb2)->saddr, NULL, 0, 0); 617 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
618 NULL, 0, 0);
606 619
607 if (rt && rt->rt6i_dev) 620 if (rt && rt->rt6i_dev)
608 skb2->dev = rt->rt6i_dev; 621 skb2->dev = rt->rt6i_dev;
@@ -646,16 +659,17 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
646{ 659{
647 struct ip6_tnl_parm *p = &t->parms; 660 struct ip6_tnl_parm *p = &t->parms;
648 int ret = 0; 661 int ret = 0;
662 struct net *net = dev_net(t->dev);
649 663
650 if (p->flags & IP6_TNL_F_CAP_RCV) { 664 if (p->flags & IP6_TNL_F_CAP_RCV) {
651 struct net_device *ldev = NULL; 665 struct net_device *ldev = NULL;
652 666
653 if (p->link) 667 if (p->link)
654 ldev = dev_get_by_index(&init_net, p->link); 668 ldev = dev_get_by_index(net, p->link);
655 669
656 if ((ipv6_addr_is_multicast(&p->laddr) || 670 if ((ipv6_addr_is_multicast(&p->laddr) ||
657 likely(ipv6_chk_addr(&init_net, &p->laddr, ldev, 0))) && 671 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) &&
658 likely(!ipv6_chk_addr(&init_net, &p->raddr, NULL, 0))) 672 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0)))
659 ret = 1; 673 ret = 1;
660 674
661 if (ldev) 675 if (ldev)
@@ -684,7 +698,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
684 698
685 read_lock(&ip6_tnl_lock); 699 read_lock(&ip6_tnl_lock);
686 700
687 if ((t = ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) { 701 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
702 &ipv6h->daddr)) != NULL) {
688 if (t->parms.proto != ipproto && t->parms.proto != 0) { 703 if (t->parms.proto != ipproto && t->parms.proto != 0) {
689 read_unlock(&ip6_tnl_lock); 704 read_unlock(&ip6_tnl_lock);
690 goto discard; 705 goto discard;
@@ -782,19 +797,20 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
782{ 797{
783 struct ip6_tnl_parm *p = &t->parms; 798 struct ip6_tnl_parm *p = &t->parms;
784 int ret = 0; 799 int ret = 0;
800 struct net *net = dev_net(t->dev);
785 801
786 if (p->flags & IP6_TNL_F_CAP_XMIT) { 802 if (p->flags & IP6_TNL_F_CAP_XMIT) {
787 struct net_device *ldev = NULL; 803 struct net_device *ldev = NULL;
788 804
789 if (p->link) 805 if (p->link)
790 ldev = dev_get_by_index(&init_net, p->link); 806 ldev = dev_get_by_index(net, p->link);
791 807
792 if (unlikely(!ipv6_chk_addr(&init_net, &p->laddr, ldev, 0))) 808 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
793 printk(KERN_WARNING 809 printk(KERN_WARNING
794 "%s xmit: Local address not yet configured!\n", 810 "%s xmit: Local address not yet configured!\n",
795 p->name); 811 p->name);
796 else if (!ipv6_addr_is_multicast(&p->raddr) && 812 else if (!ipv6_addr_is_multicast(&p->raddr) &&
797 unlikely(ipv6_chk_addr(&init_net, &p->raddr, NULL, 0))) 813 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
798 printk(KERN_WARNING 814 printk(KERN_WARNING
799 "%s xmit: Routing loop! " 815 "%s xmit: Routing loop! "
800 "Remote address found on this node!\n", 816 "Remote address found on this node!\n",
@@ -847,7 +863,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
847 if ((dst = ip6_tnl_dst_check(t)) != NULL) 863 if ((dst = ip6_tnl_dst_check(t)) != NULL)
848 dst_hold(dst); 864 dst_hold(dst);
849 else { 865 else {
850 dst = ip6_route_output(NULL, fl); 866 dst = ip6_route_output(dev_net(dev), NULL, fl);
851 867
852 if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0) 868 if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0)
853 goto tx_err_link_failure; 869 goto tx_err_link_failure;
@@ -1112,7 +1128,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1112 int strict = (ipv6_addr_type(&p->raddr) & 1128 int strict = (ipv6_addr_type(&p->raddr) &
1113 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1129 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1114 1130
1115 struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr, 1131 struct rt6_info *rt = rt6_lookup(dev_net(dev),
1132 &p->raddr, &p->laddr,
1116 p->link, strict); 1133 p->link, strict);
1117 1134
1118 if (rt == NULL) 1135 if (rt == NULL)
@@ -1191,15 +1208,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1191 int err = 0; 1208 int err = 0;
1192 struct ip6_tnl_parm p; 1209 struct ip6_tnl_parm p;
1193 struct ip6_tnl *t = NULL; 1210 struct ip6_tnl *t = NULL;
1211 struct net *net = dev_net(dev);
1212 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1194 1213
1195 switch (cmd) { 1214 switch (cmd) {
1196 case SIOCGETTUNNEL: 1215 case SIOCGETTUNNEL:
1197 if (dev == ip6_fb_tnl_dev) { 1216 if (dev == ip6n->fb_tnl_dev) {
1198 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { 1217 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
1199 err = -EFAULT; 1218 err = -EFAULT;
1200 break; 1219 break;
1201 } 1220 }
1202 t = ip6_tnl_locate(&p, 0); 1221 t = ip6_tnl_locate(net, &p, 0);
1203 } 1222 }
1204 if (t == NULL) 1223 if (t == NULL)
1205 t = netdev_priv(dev); 1224 t = netdev_priv(dev);
@@ -1220,8 +1239,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1220 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1239 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1221 p.proto != 0) 1240 p.proto != 0)
1222 break; 1241 break;
1223 t = ip6_tnl_locate(&p, cmd == SIOCADDTUNNEL); 1242 t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
1224 if (dev != ip6_fb_tnl_dev && cmd == SIOCCHGTUNNEL) { 1243 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
1225 if (t != NULL) { 1244 if (t != NULL) {
1226 if (t->dev != dev) { 1245 if (t->dev != dev) {
1227 err = -EEXIST; 1246 err = -EEXIST;
@@ -1230,9 +1249,9 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1230 } else 1249 } else
1231 t = netdev_priv(dev); 1250 t = netdev_priv(dev);
1232 1251
1233 ip6_tnl_unlink(t); 1252 ip6_tnl_unlink(ip6n, t);
1234 err = ip6_tnl_change(t, &p); 1253 err = ip6_tnl_change(t, &p);
1235 ip6_tnl_link(t); 1254 ip6_tnl_link(ip6n, t);
1236 netdev_state_change(dev); 1255 netdev_state_change(dev);
1237 } 1256 }
1238 if (t) { 1257 if (t) {
@@ -1248,15 +1267,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1248 if (!capable(CAP_NET_ADMIN)) 1267 if (!capable(CAP_NET_ADMIN))
1249 break; 1268 break;
1250 1269
1251 if (dev == ip6_fb_tnl_dev) { 1270 if (dev == ip6n->fb_tnl_dev) {
1252 err = -EFAULT; 1271 err = -EFAULT;
1253 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) 1272 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1254 break; 1273 break;
1255 err = -ENOENT; 1274 err = -ENOENT;
1256 if ((t = ip6_tnl_locate(&p, 0)) == NULL) 1275 if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
1257 break; 1276 break;
1258 err = -EPERM; 1277 err = -EPERM;
1259 if (t->dev == ip6_fb_tnl_dev) 1278 if (t->dev == ip6n->fb_tnl_dev)
1260 break; 1279 break;
1261 dev = t->dev; 1280 dev = t->dev;
1262 } 1281 }
@@ -1324,6 +1343,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
1324 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); 1343 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
1325 dev->flags |= IFF_NOARP; 1344 dev->flags |= IFF_NOARP;
1326 dev->addr_len = sizeof(struct in6_addr); 1345 dev->addr_len = sizeof(struct in6_addr);
1346 dev->features |= NETIF_F_NETNS_LOCAL;
1327} 1347}
1328 1348
1329 1349
@@ -1365,10 +1385,13 @@ static int
1365ip6_fb_tnl_dev_init(struct net_device *dev) 1385ip6_fb_tnl_dev_init(struct net_device *dev)
1366{ 1386{
1367 struct ip6_tnl *t = netdev_priv(dev); 1387 struct ip6_tnl *t = netdev_priv(dev);
1388 struct net *net = dev_net(dev);
1389 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1390
1368 ip6_tnl_dev_init_gen(dev); 1391 ip6_tnl_dev_init_gen(dev);
1369 t->parms.proto = IPPROTO_IPV6; 1392 t->parms.proto = IPPROTO_IPV6;
1370 dev_hold(dev); 1393 dev_hold(dev);
1371 tnls_wc[0] = t; 1394 ip6n->tnls_wc[0] = t;
1372 return 0; 1395 return 0;
1373} 1396}
1374 1397
@@ -1384,6 +1407,78 @@ static struct xfrm6_tunnel ip6ip6_handler = {
1384 .priority = 1, 1407 .priority = 1,
1385}; 1408};
1386 1409
1410static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1411{
1412 int h;
1413 struct ip6_tnl *t;
1414
1415 for (h = 0; h < HASH_SIZE; h++) {
1416 while ((t = ip6n->tnls_r_l[h]) != NULL)
1417 unregister_netdevice(t->dev);
1418 }
1419
1420 t = ip6n->tnls_wc[0];
1421 unregister_netdevice(t->dev);
1422}
1423
1424static int ip6_tnl_init_net(struct net *net)
1425{
1426 int err;
1427 struct ip6_tnl_net *ip6n;
1428
1429 err = -ENOMEM;
1430 ip6n = kzalloc(sizeof(struct ip6_tnl_net), GFP_KERNEL);
1431 if (ip6n == NULL)
1432 goto err_alloc;
1433
1434 err = net_assign_generic(net, ip6_tnl_net_id, ip6n);
1435 if (err < 0)
1436 goto err_assign;
1437
1438 ip6n->tnls[0] = ip6n->tnls_wc;
1439 ip6n->tnls[1] = ip6n->tnls_r_l;
1440
1441 err = -ENOMEM;
1442 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
1443 ip6_tnl_dev_setup);
1444
1445 if (!ip6n->fb_tnl_dev)
1446 goto err_alloc_dev;
1447
1448 ip6n->fb_tnl_dev->init = ip6_fb_tnl_dev_init;
1449 dev_net_set(ip6n->fb_tnl_dev, net);
1450
1451 err = register_netdev(ip6n->fb_tnl_dev);
1452 if (err < 0)
1453 goto err_register;
1454 return 0;
1455
1456err_register:
1457 free_netdev(ip6n->fb_tnl_dev);
1458err_alloc_dev:
1459 /* nothing */
1460err_assign:
1461 kfree(ip6n);
1462err_alloc:
1463 return err;
1464}
1465
1466static void ip6_tnl_exit_net(struct net *net)
1467{
1468 struct ip6_tnl_net *ip6n;
1469
1470 ip6n = net_generic(net, ip6_tnl_net_id);
1471 rtnl_lock();
1472 ip6_tnl_destroy_tunnels(ip6n);
1473 rtnl_unlock();
1474 kfree(ip6n);
1475}
1476
1477static struct pernet_operations ip6_tnl_net_ops = {
1478 .init = ip6_tnl_init_net,
1479 .exit = ip6_tnl_exit_net,
1480};
1481
1387/** 1482/**
1388 * ip6_tunnel_init - register protocol and reserve needed resources 1483 * ip6_tunnel_init - register protocol and reserve needed resources
1389 * 1484 *
@@ -1405,21 +1500,12 @@ static int __init ip6_tunnel_init(void)
1405 err = -EAGAIN; 1500 err = -EAGAIN;
1406 goto unreg_ip4ip6; 1501 goto unreg_ip4ip6;
1407 } 1502 }
1408 ip6_fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
1409 ip6_tnl_dev_setup);
1410 1503
1411 if (!ip6_fb_tnl_dev) { 1504 err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops);
1412 err = -ENOMEM; 1505 if (err < 0)
1413 goto fail; 1506 goto err_pernet;
1414 }
1415 ip6_fb_tnl_dev->init = ip6_fb_tnl_dev_init;
1416
1417 if ((err = register_netdev(ip6_fb_tnl_dev))) {
1418 free_netdev(ip6_fb_tnl_dev);
1419 goto fail;
1420 }
1421 return 0; 1507 return 0;
1422fail: 1508err_pernet:
1423 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 1509 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
1424unreg_ip4ip6: 1510unreg_ip4ip6:
1425 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 1511 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
@@ -1427,20 +1513,6 @@ out:
1427 return err; 1513 return err;
1428} 1514}
1429 1515
1430static void __exit ip6_tnl_destroy_tunnels(void)
1431{
1432 int h;
1433 struct ip6_tnl *t;
1434
1435 for (h = 0; h < HASH_SIZE; h++) {
1436 while ((t = tnls_r_l[h]) != NULL)
1437 unregister_netdevice(t->dev);
1438 }
1439
1440 t = tnls_wc[0];
1441 unregister_netdevice(t->dev);
1442}
1443
1444/** 1516/**
1445 * ip6_tunnel_cleanup - free resources and unregister protocol 1517 * ip6_tunnel_cleanup - free resources and unregister protocol
1446 **/ 1518 **/
@@ -1453,9 +1525,7 @@ static void __exit ip6_tunnel_cleanup(void)
1453 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 1525 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
1454 printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n"); 1526 printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
1455 1527
1456 rtnl_lock(); 1528 unregister_pernet_gen_device(ip6_tnl_net_id, &ip6_tnl_net_ops);
1457 ip6_tnl_destroy_tunnels();
1458 rtnl_unlock();
1459} 1529}
1460 1530
1461module_init(ip6_tunnel_init); 1531module_init(ip6_tunnel_init);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
new file mode 100644
index 000000000000..c8c6e33d1163
--- /dev/null
+++ b/net/ipv6/ip6mr.c
@@ -0,0 +1,1643 @@
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/timer.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <linux/fcntl.h>
28#include <linux/stat.h>
29#include <linux/socket.h>
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/inetdevice.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/init.h>
36#include <net/protocol.h>
37#include <linux/skbuff.h>
38#include <net/sock.h>
39#include <net/raw.h>
40#include <linux/notifier.h>
41#include <linux/if_arp.h>
42#include <net/checksum.h>
43#include <net/netlink.h>
44
45#include <net/ipv6.h>
46#include <net/ip6_route.h>
47#include <linux/mroute6.h>
48#include <linux/pim.h>
49#include <net/addrconf.h>
50#include <linux/netfilter_ipv6.h>
51
52struct sock *mroute6_socket;
53
54
55/* Big lock, protecting vif table, mrt cache and mroute socket state.
56 Note that the changes are semaphored via rtnl_lock.
57 */
58
59static DEFINE_RWLOCK(mrt_lock);
60
61/*
62 * Multicast router control variables
63 */
64
65static struct mif_device vif6_table[MAXMIFS]; /* Devices */
66static int maxvif;
67
68#define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
69
70static int mroute_do_assert; /* Set in PIM assert */
71#ifdef CONFIG_IPV6_PIMSM_V2
72static int mroute_do_pim;
73#else
74#define mroute_do_pim 0
75#endif
76
77static struct mfc6_cache *mfc6_cache_array[MFC6_LINES]; /* Forwarding cache */
78
79static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
80static atomic_t cache_resolve_queue_len; /* Size of unresolved */
81
82/* Special spinlock for queue of unresolved entries */
83static DEFINE_SPINLOCK(mfc_unres_lock);
84
85/* We return to original Alan's scheme. Hash table of resolved
86 entries is changed only in process context and protected
87 with weak lock mrt_lock. Queue of unresolved entries is protected
88 with strong spinlock mfc_unres_lock.
89
90 In this case data path is free of exclusive locks at all.
91 */
92
93static struct kmem_cache *mrt_cachep __read_mostly;
94
95static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
96static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert);
97static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
98
99#ifdef CONFIG_IPV6_PIMSM_V2
100static struct inet6_protocol pim6_protocol;
101#endif
102
103static struct timer_list ipmr_expire_timer;
104
105
106#ifdef CONFIG_PROC_FS
107
108struct ipmr_mfc_iter {
109 struct mfc6_cache **cache;
110 int ct;
111};
112
113
114static struct mfc6_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
115{
116 struct mfc6_cache *mfc;
117
118 it->cache = mfc6_cache_array;
119 read_lock(&mrt_lock);
120 for (it->ct = 0; it->ct < ARRAY_SIZE(mfc6_cache_array); it->ct++)
121 for (mfc = mfc6_cache_array[it->ct]; mfc; mfc = mfc->next)
122 if (pos-- == 0)
123 return mfc;
124 read_unlock(&mrt_lock);
125
126 it->cache = &mfc_unres_queue;
127 spin_lock_bh(&mfc_unres_lock);
128 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
129 if (pos-- == 0)
130 return mfc;
131 spin_unlock_bh(&mfc_unres_lock);
132
133 it->cache = NULL;
134 return NULL;
135}
136
137
138
139
140/*
141 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
142 */
143
144struct ipmr_vif_iter {
145 int ct;
146};
147
148static struct mif_device *ip6mr_vif_seq_idx(struct ipmr_vif_iter *iter,
149 loff_t pos)
150{
151 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
152 if (!MIF_EXISTS(iter->ct))
153 continue;
154 if (pos-- == 0)
155 return &vif6_table[iter->ct];
156 }
157 return NULL;
158}
159
160static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
161 __acquires(mrt_lock)
162{
163 read_lock(&mrt_lock);
164 return (*pos ? ip6mr_vif_seq_idx(seq->private, *pos - 1)
165 : SEQ_START_TOKEN);
166}
167
168static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
169{
170 struct ipmr_vif_iter *iter = seq->private;
171
172 ++*pos;
173 if (v == SEQ_START_TOKEN)
174 return ip6mr_vif_seq_idx(iter, 0);
175
176 while (++iter->ct < maxvif) {
177 if (!MIF_EXISTS(iter->ct))
178 continue;
179 return &vif6_table[iter->ct];
180 }
181 return NULL;
182}
183
184static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
185 __releases(mrt_lock)
186{
187 read_unlock(&mrt_lock);
188}
189
190static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
191{
192 if (v == SEQ_START_TOKEN) {
193 seq_puts(seq,
194 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
195 } else {
196 const struct mif_device *vif = v;
197 const char *name = vif->dev ? vif->dev->name : "none";
198
199 seq_printf(seq,
200 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X\n",
201 vif - vif6_table,
202 name, vif->bytes_in, vif->pkt_in,
203 vif->bytes_out, vif->pkt_out,
204 vif->flags);
205 }
206 return 0;
207}
208
209static struct seq_operations ip6mr_vif_seq_ops = {
210 .start = ip6mr_vif_seq_start,
211 .next = ip6mr_vif_seq_next,
212 .stop = ip6mr_vif_seq_stop,
213 .show = ip6mr_vif_seq_show,
214};
215
216static int ip6mr_vif_open(struct inode *inode, struct file *file)
217{
218 return seq_open_private(file, &ip6mr_vif_seq_ops,
219 sizeof(struct ipmr_vif_iter));
220}
221
222static struct file_operations ip6mr_vif_fops = {
223 .owner = THIS_MODULE,
224 .open = ip6mr_vif_open,
225 .read = seq_read,
226 .llseek = seq_lseek,
227 .release = seq_release,
228};
229
230static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
231{
232 return (*pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
233 : SEQ_START_TOKEN);
234}
235
236static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
237{
238 struct mfc6_cache *mfc = v;
239 struct ipmr_mfc_iter *it = seq->private;
240
241 ++*pos;
242
243 if (v == SEQ_START_TOKEN)
244 return ipmr_mfc_seq_idx(seq->private, 0);
245
246 if (mfc->next)
247 return mfc->next;
248
249 if (it->cache == &mfc_unres_queue)
250 goto end_of_list;
251
252 BUG_ON(it->cache != mfc6_cache_array);
253
254 while (++it->ct < ARRAY_SIZE(mfc6_cache_array)) {
255 mfc = mfc6_cache_array[it->ct];
256 if (mfc)
257 return mfc;
258 }
259
260 /* exhausted cache_array, show unresolved */
261 read_unlock(&mrt_lock);
262 it->cache = &mfc_unres_queue;
263 it->ct = 0;
264
265 spin_lock_bh(&mfc_unres_lock);
266 mfc = mfc_unres_queue;
267 if (mfc)
268 return mfc;
269
270 end_of_list:
271 spin_unlock_bh(&mfc_unres_lock);
272 it->cache = NULL;
273
274 return NULL;
275}
276
277static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
278{
279 struct ipmr_mfc_iter *it = seq->private;
280
281 if (it->cache == &mfc_unres_queue)
282 spin_unlock_bh(&mfc_unres_lock);
283 else if (it->cache == mfc6_cache_array)
284 read_unlock(&mrt_lock);
285}
286
287static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
288{
289 int n;
290
291 if (v == SEQ_START_TOKEN) {
292 seq_puts(seq,
293 "Group "
294 "Origin "
295 "Iif Pkts Bytes Wrong Oifs\n");
296 } else {
297 const struct mfc6_cache *mfc = v;
298 const struct ipmr_mfc_iter *it = seq->private;
299
300 seq_printf(seq,
301 NIP6_FMT " " NIP6_FMT " %-3d %8ld %8ld %8ld",
302 NIP6(mfc->mf6c_mcastgrp), NIP6(mfc->mf6c_origin),
303 mfc->mf6c_parent,
304 mfc->mfc_un.res.pkt,
305 mfc->mfc_un.res.bytes,
306 mfc->mfc_un.res.wrong_if);
307
308 if (it->cache != &mfc_unres_queue) {
309 for (n = mfc->mfc_un.res.minvif;
310 n < mfc->mfc_un.res.maxvif; n++) {
311 if (MIF_EXISTS(n) &&
312 mfc->mfc_un.res.ttls[n] < 255)
313 seq_printf(seq,
314 " %2d:%-3d",
315 n, mfc->mfc_un.res.ttls[n]);
316 }
317 }
318 seq_putc(seq, '\n');
319 }
320 return 0;
321}
322
323static struct seq_operations ipmr_mfc_seq_ops = {
324 .start = ipmr_mfc_seq_start,
325 .next = ipmr_mfc_seq_next,
326 .stop = ipmr_mfc_seq_stop,
327 .show = ipmr_mfc_seq_show,
328};
329
330static int ipmr_mfc_open(struct inode *inode, struct file *file)
331{
332 return seq_open_private(file, &ipmr_mfc_seq_ops,
333 sizeof(struct ipmr_mfc_iter));
334}
335
336static struct file_operations ip6mr_mfc_fops = {
337 .owner = THIS_MODULE,
338 .open = ipmr_mfc_open,
339 .read = seq_read,
340 .llseek = seq_lseek,
341 .release = seq_release,
342};
343#endif
344
345#ifdef CONFIG_IPV6_PIMSM_V2
346static int reg_vif_num = -1;
347
348static int pim6_rcv(struct sk_buff *skb)
349{
350 struct pimreghdr *pim;
351 struct ipv6hdr *encap;
352 struct net_device *reg_dev = NULL;
353
354 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
355 goto drop;
356
357 pim = (struct pimreghdr *)skb_transport_header(skb);
358 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
359 (pim->flags & PIM_NULL_REGISTER) ||
360 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
361 (u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))))
362 goto drop;
363
364 /* check if the inner packet is destined to mcast group */
365 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
366 sizeof(*pim));
367
368 if (!ipv6_addr_is_multicast(&encap->daddr) ||
369 encap->payload_len == 0 ||
370 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
371 goto drop;
372
373 read_lock(&mrt_lock);
374 if (reg_vif_num >= 0)
375 reg_dev = vif6_table[reg_vif_num].dev;
376 if (reg_dev)
377 dev_hold(reg_dev);
378 read_unlock(&mrt_lock);
379
380 if (reg_dev == NULL)
381 goto drop;
382
383 skb->mac_header = skb->network_header;
384 skb_pull(skb, (u8 *)encap - skb->data);
385 skb_reset_network_header(skb);
386 skb->dev = reg_dev;
387 skb->protocol = htons(ETH_P_IP);
388 skb->ip_summed = 0;
389 skb->pkt_type = PACKET_HOST;
390 dst_release(skb->dst);
391 ((struct net_device_stats *)netdev_priv(reg_dev))->rx_bytes += skb->len;
392 ((struct net_device_stats *)netdev_priv(reg_dev))->rx_packets++;
393 skb->dst = NULL;
394 nf_reset(skb);
395 netif_rx(skb);
396 dev_put(reg_dev);
397 return 0;
398 drop:
399 kfree_skb(skb);
400 return 0;
401}
402
403static struct inet6_protocol pim6_protocol = {
404 .handler = pim6_rcv,
405};
406
407/* Service routines creating virtual interfaces: PIMREG */
408
409static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
410{
411 read_lock(&mrt_lock);
412 ((struct net_device_stats *)netdev_priv(dev))->tx_bytes += skb->len;
413 ((struct net_device_stats *)netdev_priv(dev))->tx_packets++;
414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT);
415 read_unlock(&mrt_lock);
416 kfree_skb(skb);
417 return 0;
418}
419
420static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
421{
422 return (struct net_device_stats *)netdev_priv(dev);
423}
424
425static void reg_vif_setup(struct net_device *dev)
426{
427 dev->type = ARPHRD_PIMREG;
428 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
429 dev->flags = IFF_NOARP;
430 dev->hard_start_xmit = reg_vif_xmit;
431 dev->get_stats = reg_vif_get_stats;
432 dev->destructor = free_netdev;
433}
434
435static struct net_device *ip6mr_reg_vif(void)
436{
437 struct net_device *dev;
438
439 dev = alloc_netdev(sizeof(struct net_device_stats), "pim6reg",
440 reg_vif_setup);
441
442 if (dev == NULL)
443 return NULL;
444
445 if (register_netdevice(dev)) {
446 free_netdev(dev);
447 return NULL;
448 }
449 dev->iflink = 0;
450
451 if (dev_open(dev))
452 goto failure;
453
454 return dev;
455
456failure:
457 /* allow the register to be completed before unregistering. */
458 rtnl_unlock();
459 rtnl_lock();
460
461 unregister_netdevice(dev);
462 return NULL;
463}
464#endif
465
466/*
467 * Delete a VIF entry
468 */
469
470static int mif6_delete(int vifi)
471{
472 struct mif_device *v;
473 struct net_device *dev;
474 if (vifi < 0 || vifi >= maxvif)
475 return -EADDRNOTAVAIL;
476
477 v = &vif6_table[vifi];
478
479 write_lock_bh(&mrt_lock);
480 dev = v->dev;
481 v->dev = NULL;
482
483 if (!dev) {
484 write_unlock_bh(&mrt_lock);
485 return -EADDRNOTAVAIL;
486 }
487
488#ifdef CONFIG_IPV6_PIMSM_V2
489 if (vifi == reg_vif_num)
490 reg_vif_num = -1;
491#endif
492
493 if (vifi + 1 == maxvif) {
494 int tmp;
495 for (tmp = vifi - 1; tmp >= 0; tmp--) {
496 if (MIF_EXISTS(tmp))
497 break;
498 }
499 maxvif = tmp + 1;
500 }
501
502 write_unlock_bh(&mrt_lock);
503
504 dev_set_allmulti(dev, -1);
505
506 if (v->flags & MIFF_REGISTER)
507 unregister_netdevice(dev);
508
509 dev_put(dev);
510 return 0;
511}
512
513/* Destroy an unresolved cache entry, killing queued skbs
514 and reporting error to netlink readers.
515 */
516
517static void ip6mr_destroy_unres(struct mfc6_cache *c)
518{
519 struct sk_buff *skb;
520
521 atomic_dec(&cache_resolve_queue_len);
522
523 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
524 if (ipv6_hdr(skb)->version == 0) {
525 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
526 nlh->nlmsg_type = NLMSG_ERROR;
527 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
528 skb_trim(skb, nlh->nlmsg_len);
529 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
530 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
531 } else
532 kfree_skb(skb);
533 }
534
535 kmem_cache_free(mrt_cachep, c);
536}
537
538
539/* Single timer process for all the unresolved queue. */
540
541static void ipmr_do_expire_process(unsigned long dummy)
542{
543 unsigned long now = jiffies;
544 unsigned long expires = 10 * HZ;
545 struct mfc6_cache *c, **cp;
546
547 cp = &mfc_unres_queue;
548
549 while ((c = *cp) != NULL) {
550 if (time_after(c->mfc_un.unres.expires, now)) {
551 /* not yet... */
552 unsigned long interval = c->mfc_un.unres.expires - now;
553 if (interval < expires)
554 expires = interval;
555 cp = &c->next;
556 continue;
557 }
558
559 *cp = c->next;
560 ip6mr_destroy_unres(c);
561 }
562
563 if (atomic_read(&cache_resolve_queue_len))
564 mod_timer(&ipmr_expire_timer, jiffies + expires);
565}
566
567static void ipmr_expire_process(unsigned long dummy)
568{
569 if (!spin_trylock(&mfc_unres_lock)) {
570 mod_timer(&ipmr_expire_timer, jiffies + 1);
571 return;
572 }
573
574 if (atomic_read(&cache_resolve_queue_len))
575 ipmr_do_expire_process(dummy);
576
577 spin_unlock(&mfc_unres_lock);
578}
579
580/* Fill oifs list. It is called under write locked mrt_lock. */
581
582static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
583{
584 int vifi;
585
586 cache->mfc_un.res.minvif = MAXMIFS;
587 cache->mfc_un.res.maxvif = 0;
588 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
589
590 for (vifi = 0; vifi < maxvif; vifi++) {
591 if (MIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
592 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
593 if (cache->mfc_un.res.minvif > vifi)
594 cache->mfc_un.res.minvif = vifi;
595 if (cache->mfc_un.res.maxvif <= vifi)
596 cache->mfc_un.res.maxvif = vifi + 1;
597 }
598 }
599}
600
601static int mif6_add(struct mif6ctl *vifc, int mrtsock)
602{
603 int vifi = vifc->mif6c_mifi;
604 struct mif_device *v = &vif6_table[vifi];
605 struct net_device *dev;
606
607 /* Is vif busy ? */
608 if (MIF_EXISTS(vifi))
609 return -EADDRINUSE;
610
611 switch (vifc->mif6c_flags) {
612#ifdef CONFIG_IPV6_PIMSM_V2
613 case MIFF_REGISTER:
614 /*
615 * Special Purpose VIF in PIM
616 * All the packets will be sent to the daemon
617 */
618 if (reg_vif_num >= 0)
619 return -EADDRINUSE;
620 dev = ip6mr_reg_vif();
621 if (!dev)
622 return -ENOBUFS;
623 break;
624#endif
625 case 0:
626 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
627 if (!dev)
628 return -EADDRNOTAVAIL;
629 dev_put(dev);
630 break;
631 default:
632 return -EINVAL;
633 }
634
635 dev_set_allmulti(dev, 1);
636
637 /*
638 * Fill in the VIF structures
639 */
640 v->rate_limit = vifc->vifc_rate_limit;
641 v->flags = vifc->mif6c_flags;
642 if (!mrtsock)
643 v->flags |= VIFF_STATIC;
644 v->threshold = vifc->vifc_threshold;
645 v->bytes_in = 0;
646 v->bytes_out = 0;
647 v->pkt_in = 0;
648 v->pkt_out = 0;
649 v->link = dev->ifindex;
650 if (v->flags & MIFF_REGISTER)
651 v->link = dev->iflink;
652
653 /* And finish update writing critical data */
654 write_lock_bh(&mrt_lock);
655 dev_hold(dev);
656 v->dev = dev;
657#ifdef CONFIG_IPV6_PIMSM_V2
658 if (v->flags & MIFF_REGISTER)
659 reg_vif_num = vifi;
660#endif
661 if (vifi + 1 > maxvif)
662 maxvif = vifi + 1;
663 write_unlock_bh(&mrt_lock);
664 return 0;
665}
666
667static struct mfc6_cache *ip6mr_cache_find(struct in6_addr *origin, struct in6_addr *mcastgrp)
668{
669 int line = MFC6_HASH(mcastgrp, origin);
670 struct mfc6_cache *c;
671
672 for (c = mfc6_cache_array[line]; c; c = c->next) {
673 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
674 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
675 break;
676 }
677 return c;
678}
679
680/*
681 * Allocate a multicast cache entry
682 */
683static struct mfc6_cache *ip6mr_cache_alloc(void)
684{
685 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
686 if (c == NULL)
687 return NULL;
688 memset(c, 0, sizeof(*c));
689 c->mfc_un.res.minvif = MAXMIFS;
690 return c;
691}
692
693static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
694{
695 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
696 if (c == NULL)
697 return NULL;
698 memset(c, 0, sizeof(*c));
699 skb_queue_head_init(&c->mfc_un.unres.unresolved);
700 c->mfc_un.unres.expires = jiffies + 10 * HZ;
701 return c;
702}
703
704/*
705 * A cache entry has gone into a resolved state from queued
706 */
707
708static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
709{
710 struct sk_buff *skb;
711
712 /*
713 * Play the pending entries through our router
714 */
715
716 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
717 if (ipv6_hdr(skb)->version == 0) {
718 int err;
719 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
720
721 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
722 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
723 } else {
724 nlh->nlmsg_type = NLMSG_ERROR;
725 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
726 skb_trim(skb, nlh->nlmsg_len);
727 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
728 }
729 err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
730 } else
731 ip6_mr_forward(skb, c);
732 }
733}
734
735/*
736 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
737 * expects the following bizarre scheme.
738 *
739 * Called under mrt_lock.
740 */
741
742static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert)
743{
744 struct sk_buff *skb;
745 struct mrt6msg *msg;
746 int ret;
747
748#ifdef CONFIG_IPV6_PIMSM_V2
749 if (assert == MRT6MSG_WHOLEPKT)
750 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
751 +sizeof(*msg));
752 else
753#endif
754 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
755
756 if (!skb)
757 return -ENOBUFS;
758
759 /* I suppose that internal messages
760 * do not require checksums */
761
762 skb->ip_summed = CHECKSUM_UNNECESSARY;
763
764#ifdef CONFIG_IPV6_PIMSM_V2
765 if (assert == MRT6MSG_WHOLEPKT) {
766 /* Ugly, but we have no choice with this interface.
767 Duplicate old header, fix length etc.
768 And all this only to mangle msg->im6_msgtype and
769 to set msg->im6_mbz to "mbz" :-)
770 */
771 skb_push(skb, -skb_network_offset(pkt));
772
773 skb_push(skb, sizeof(*msg));
774 skb_reset_transport_header(skb);
775 msg = (struct mrt6msg *)skb_transport_header(skb);
776 msg->im6_mbz = 0;
777 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
778 msg->im6_mif = reg_vif_num;
779 msg->im6_pad = 0;
780 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
781 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
782
783 skb->ip_summed = CHECKSUM_UNNECESSARY;
784 } else
785#endif
786 {
787 /*
788 * Copy the IP header
789 */
790
791 skb_put(skb, sizeof(struct ipv6hdr));
792 skb_reset_network_header(skb);
793 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
794
795 /*
796 * Add our header
797 */
798 skb_put(skb, sizeof(*msg));
799 skb_reset_transport_header(skb);
800 msg = (struct mrt6msg *)skb_transport_header(skb);
801
802 msg->im6_mbz = 0;
803 msg->im6_msgtype = assert;
804 msg->im6_mif = mifi;
805 msg->im6_pad = 0;
806 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
807 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
808
809 skb->dst = dst_clone(pkt->dst);
810 skb->ip_summed = CHECKSUM_UNNECESSARY;
811
812 skb_pull(skb, sizeof(struct ipv6hdr));
813 }
814
815 if (mroute6_socket == NULL) {
816 kfree_skb(skb);
817 return -EINVAL;
818 }
819
820 /*
821 * Deliver to user space multicast routing algorithms
822 */
823 if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
824 if (net_ratelimit())
825 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
826 kfree_skb(skb);
827 }
828
829 return ret;
830}
831
832/*
833 * Queue a packet for resolution. It gets locked cache entry!
834 */
835
836static int
837ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
838{
839 int err;
840 struct mfc6_cache *c;
841
842 spin_lock_bh(&mfc_unres_lock);
843 for (c = mfc_unres_queue; c; c = c->next) {
844 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
845 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
846 break;
847 }
848
849 if (c == NULL) {
850 /*
851 * Create a new entry if allowable
852 */
853
854 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
855 (c = ip6mr_cache_alloc_unres()) == NULL) {
856 spin_unlock_bh(&mfc_unres_lock);
857
858 kfree_skb(skb);
859 return -ENOBUFS;
860 }
861
862 /*
863 * Fill in the new cache entry
864 */
865 c->mf6c_parent = -1;
866 c->mf6c_origin = ipv6_hdr(skb)->saddr;
867 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
868
869 /*
870 * Reflect first query at pim6sd
871 */
872 if ((err = ip6mr_cache_report(skb, mifi, MRT6MSG_NOCACHE)) < 0) {
873 /* If the report failed throw the cache entry
874 out - Brad Parker
875 */
876 spin_unlock_bh(&mfc_unres_lock);
877
878 kmem_cache_free(mrt_cachep, c);
879 kfree_skb(skb);
880 return err;
881 }
882
883 atomic_inc(&cache_resolve_queue_len);
884 c->next = mfc_unres_queue;
885 mfc_unres_queue = c;
886
887 ipmr_do_expire_process(1);
888 }
889
890 /*
891 * See if we can append the packet
892 */
893 if (c->mfc_un.unres.unresolved.qlen > 3) {
894 kfree_skb(skb);
895 err = -ENOBUFS;
896 } else {
897 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
898 err = 0;
899 }
900
901 spin_unlock_bh(&mfc_unres_lock);
902 return err;
903}
904
905/*
906 * MFC6 cache manipulation by user space
907 */
908
909static int ip6mr_mfc_delete(struct mf6cctl *mfc)
910{
911 int line;
912 struct mfc6_cache *c, **cp;
913
914 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
915
916 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
917 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
918 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
919 write_lock_bh(&mrt_lock);
920 *cp = c->next;
921 write_unlock_bh(&mrt_lock);
922
923 kmem_cache_free(mrt_cachep, c);
924 return 0;
925 }
926 }
927 return -ENOENT;
928}
929
930static int ip6mr_device_event(struct notifier_block *this,
931 unsigned long event, void *ptr)
932{
933 struct net_device *dev = ptr;
934 struct mif_device *v;
935 int ct;
936
937 if (dev_net(dev) != &init_net)
938 return NOTIFY_DONE;
939
940 if (event != NETDEV_UNREGISTER)
941 return NOTIFY_DONE;
942
943 v = &vif6_table[0];
944 for (ct = 0; ct < maxvif; ct++, v++) {
945 if (v->dev == dev)
946 mif6_delete(ct);
947 }
948 return NOTIFY_DONE;
949}
950
951static struct notifier_block ip6_mr_notifier = {
952 .notifier_call = ip6mr_device_event
953};
954
955/*
956 * Setup for IP multicast routing
957 */
958
959void __init ip6_mr_init(void)
960{
961 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
962 sizeof(struct mfc6_cache),
963 0, SLAB_HWCACHE_ALIGN,
964 NULL);
965 if (!mrt_cachep)
966 panic("cannot allocate ip6_mrt_cache");
967
968 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
969 register_netdevice_notifier(&ip6_mr_notifier);
970#ifdef CONFIG_PROC_FS
971 proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops);
972 proc_net_fops_create(&init_net, "ip6_mr_cache", 0, &ip6mr_mfc_fops);
973#endif
974}
975
976
977static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
978{
979 int line;
980 struct mfc6_cache *uc, *c, **cp;
981 unsigned char ttls[MAXMIFS];
982 int i;
983
984 memset(ttls, 255, MAXMIFS);
985 for (i = 0; i < MAXMIFS; i++) {
986 if (IF_ISSET(i, &mfc->mf6cc_ifset))
987 ttls[i] = 1;
988
989 }
990
991 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
992
993 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
994 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
995 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
996 break;
997 }
998
999 if (c != NULL) {
1000 write_lock_bh(&mrt_lock);
1001 c->mf6c_parent = mfc->mf6cc_parent;
1002 ip6mr_update_thresholds(c, ttls);
1003 if (!mrtsock)
1004 c->mfc_flags |= MFC_STATIC;
1005 write_unlock_bh(&mrt_lock);
1006 return 0;
1007 }
1008
1009 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1010 return -EINVAL;
1011
1012 c = ip6mr_cache_alloc();
1013 if (c == NULL)
1014 return -ENOMEM;
1015
1016 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1017 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1018 c->mf6c_parent = mfc->mf6cc_parent;
1019 ip6mr_update_thresholds(c, ttls);
1020 if (!mrtsock)
1021 c->mfc_flags |= MFC_STATIC;
1022
1023 write_lock_bh(&mrt_lock);
1024 c->next = mfc6_cache_array[line];
1025 mfc6_cache_array[line] = c;
1026 write_unlock_bh(&mrt_lock);
1027
1028 /*
1029 * Check to see if we resolved a queued list. If so we
1030 * need to send on the frames and tidy up.
1031 */
1032 spin_lock_bh(&mfc_unres_lock);
1033 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
1034 cp = &uc->next) {
1035 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1036 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1037 *cp = uc->next;
1038 if (atomic_dec_and_test(&cache_resolve_queue_len))
1039 del_timer(&ipmr_expire_timer);
1040 break;
1041 }
1042 }
1043 spin_unlock_bh(&mfc_unres_lock);
1044
1045 if (uc) {
1046 ip6mr_cache_resolve(uc, c);
1047 kmem_cache_free(mrt_cachep, uc);
1048 }
1049 return 0;
1050}
1051
1052/*
1053 * Close the multicast socket, and clear the vif tables etc
1054 */
1055
1056static void mroute_clean_tables(struct sock *sk)
1057{
1058 int i;
1059
1060 /*
1061 * Shut down all active vif entries
1062 */
1063 for (i = 0; i < maxvif; i++) {
1064 if (!(vif6_table[i].flags & VIFF_STATIC))
1065 mif6_delete(i);
1066 }
1067
1068 /*
1069 * Wipe the cache
1070 */
1071 for (i = 0; i < ARRAY_SIZE(mfc6_cache_array); i++) {
1072 struct mfc6_cache *c, **cp;
1073
1074 cp = &mfc6_cache_array[i];
1075 while ((c = *cp) != NULL) {
1076 if (c->mfc_flags & MFC_STATIC) {
1077 cp = &c->next;
1078 continue;
1079 }
1080 write_lock_bh(&mrt_lock);
1081 *cp = c->next;
1082 write_unlock_bh(&mrt_lock);
1083
1084 kmem_cache_free(mrt_cachep, c);
1085 }
1086 }
1087
1088 if (atomic_read(&cache_resolve_queue_len) != 0) {
1089 struct mfc6_cache *c;
1090
1091 spin_lock_bh(&mfc_unres_lock);
1092 while (mfc_unres_queue != NULL) {
1093 c = mfc_unres_queue;
1094 mfc_unres_queue = c->next;
1095 spin_unlock_bh(&mfc_unres_lock);
1096
1097 ip6mr_destroy_unres(c);
1098
1099 spin_lock_bh(&mfc_unres_lock);
1100 }
1101 spin_unlock_bh(&mfc_unres_lock);
1102 }
1103}
1104
1105static int ip6mr_sk_init(struct sock *sk)
1106{
1107 int err = 0;
1108
1109 rtnl_lock();
1110 write_lock_bh(&mrt_lock);
1111 if (likely(mroute6_socket == NULL))
1112 mroute6_socket = sk;
1113 else
1114 err = -EADDRINUSE;
1115 write_unlock_bh(&mrt_lock);
1116
1117 rtnl_unlock();
1118
1119 return err;
1120}
1121
1122int ip6mr_sk_done(struct sock *sk)
1123{
1124 int err = 0;
1125
1126 rtnl_lock();
1127 if (sk == mroute6_socket) {
1128 write_lock_bh(&mrt_lock);
1129 mroute6_socket = NULL;
1130 write_unlock_bh(&mrt_lock);
1131
1132 mroute_clean_tables(sk);
1133 } else
1134 err = -EACCES;
1135 rtnl_unlock();
1136
1137 return err;
1138}
1139
1140/*
1141 * Socket options and virtual interface manipulation. The whole
1142 * virtual interface system is a complete heap, but unfortunately
1143 * that's how BSD mrouted happens to think. Maybe one day with a proper
1144 * MOSPF/PIM router set up we can clean this up.
1145 */
1146
1147int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
1148{
1149 int ret;
1150 struct mif6ctl vif;
1151 struct mf6cctl mfc;
1152 mifi_t mifi;
1153
1154 if (optname != MRT6_INIT) {
1155 if (sk != mroute6_socket && !capable(CAP_NET_ADMIN))
1156 return -EACCES;
1157 }
1158
1159 switch (optname) {
1160 case MRT6_INIT:
1161 if (sk->sk_type != SOCK_RAW ||
1162 inet_sk(sk)->num != IPPROTO_ICMPV6)
1163 return -EOPNOTSUPP;
1164 if (optlen < sizeof(int))
1165 return -EINVAL;
1166
1167 return ip6mr_sk_init(sk);
1168
1169 case MRT6_DONE:
1170 return ip6mr_sk_done(sk);
1171
1172 case MRT6_ADD_MIF:
1173 if (optlen < sizeof(vif))
1174 return -EINVAL;
1175 if (copy_from_user(&vif, optval, sizeof(vif)))
1176 return -EFAULT;
1177 if (vif.mif6c_mifi >= MAXMIFS)
1178 return -ENFILE;
1179 rtnl_lock();
1180 ret = mif6_add(&vif, sk == mroute6_socket);
1181 rtnl_unlock();
1182 return ret;
1183
1184 case MRT6_DEL_MIF:
1185 if (optlen < sizeof(mifi_t))
1186 return -EINVAL;
1187 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1188 return -EFAULT;
1189 rtnl_lock();
1190 ret = mif6_delete(mifi);
1191 rtnl_unlock();
1192 return ret;
1193
1194 /*
1195 * Manipulate the forwarding caches. These live
1196 * in a sort of kernel/user symbiosis.
1197 */
1198 case MRT6_ADD_MFC:
1199 case MRT6_DEL_MFC:
1200 if (optlen < sizeof(mfc))
1201 return -EINVAL;
1202 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1203 return -EFAULT;
1204 rtnl_lock();
1205 if (optname == MRT6_DEL_MFC)
1206 ret = ip6mr_mfc_delete(&mfc);
1207 else
1208 ret = ip6mr_mfc_add(&mfc, sk == mroute6_socket);
1209 rtnl_unlock();
1210 return ret;
1211
1212 /*
1213 * Control PIM assert (to activate pim will activate assert)
1214 */
1215 case MRT6_ASSERT:
1216 {
1217 int v;
1218 if (get_user(v, (int __user *)optval))
1219 return -EFAULT;
1220 mroute_do_assert = !!v;
1221 return 0;
1222 }
1223
1224#ifdef CONFIG_IPV6_PIMSM_V2
1225 case MRT6_PIM:
1226 {
1227 int v;
1228 if (get_user(v, (int __user *)optval))
1229 return -EFAULT;
1230 v = !!v;
1231 rtnl_lock();
1232 ret = 0;
1233 if (v != mroute_do_pim) {
1234 mroute_do_pim = v;
1235 mroute_do_assert = v;
1236 if (mroute_do_pim)
1237 ret = inet6_add_protocol(&pim6_protocol,
1238 IPPROTO_PIM);
1239 else
1240 ret = inet6_del_protocol(&pim6_protocol,
1241 IPPROTO_PIM);
1242 if (ret < 0)
1243 ret = -EAGAIN;
1244 }
1245 rtnl_unlock();
1246 return ret;
1247 }
1248
1249#endif
1250 /*
1251 * Spurious command, or MRT_VERSION which you cannot
1252 * set.
1253 */
1254 default:
1255 return -ENOPROTOOPT;
1256 }
1257}
1258
1259/*
1260 * Getsock opt support for the multicast routing system.
1261 */
1262
1263int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1264 int __user *optlen)
1265{
1266 int olr;
1267 int val;
1268
1269 switch (optname) {
1270 case MRT6_VERSION:
1271 val = 0x0305;
1272 break;
1273#ifdef CONFIG_IPV6_PIMSM_V2
1274 case MRT6_PIM:
1275 val = mroute_do_pim;
1276 break;
1277#endif
1278 case MRT6_ASSERT:
1279 val = mroute_do_assert;
1280 break;
1281 default:
1282 return -ENOPROTOOPT;
1283 }
1284
1285 if (get_user(olr, optlen))
1286 return -EFAULT;
1287
1288 olr = min_t(int, olr, sizeof(int));
1289 if (olr < 0)
1290 return -EINVAL;
1291
1292 if (put_user(olr, optlen))
1293 return -EFAULT;
1294 if (copy_to_user(optval, &val, olr))
1295 return -EFAULT;
1296 return 0;
1297}
1298
1299/*
1300 * The IP multicast ioctl support routines.
1301 */
1302
1303int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1304{
1305 struct sioc_sg_req6 sr;
1306 struct sioc_mif_req6 vr;
1307 struct mif_device *vif;
1308 struct mfc6_cache *c;
1309
1310 switch (cmd) {
1311 case SIOCGETMIFCNT_IN6:
1312 if (copy_from_user(&vr, arg, sizeof(vr)))
1313 return -EFAULT;
1314 if (vr.mifi >= maxvif)
1315 return -EINVAL;
1316 read_lock(&mrt_lock);
1317 vif = &vif6_table[vr.mifi];
1318 if (MIF_EXISTS(vr.mifi)) {
1319 vr.icount = vif->pkt_in;
1320 vr.ocount = vif->pkt_out;
1321 vr.ibytes = vif->bytes_in;
1322 vr.obytes = vif->bytes_out;
1323 read_unlock(&mrt_lock);
1324
1325 if (copy_to_user(arg, &vr, sizeof(vr)))
1326 return -EFAULT;
1327 return 0;
1328 }
1329 read_unlock(&mrt_lock);
1330 return -EADDRNOTAVAIL;
1331 case SIOCGETSGCNT_IN6:
1332 if (copy_from_user(&sr, arg, sizeof(sr)))
1333 return -EFAULT;
1334
1335 read_lock(&mrt_lock);
1336 c = ip6mr_cache_find(&sr.src.sin6_addr, &sr.grp.sin6_addr);
1337 if (c) {
1338 sr.pktcnt = c->mfc_un.res.pkt;
1339 sr.bytecnt = c->mfc_un.res.bytes;
1340 sr.wrong_if = c->mfc_un.res.wrong_if;
1341 read_unlock(&mrt_lock);
1342
1343 if (copy_to_user(arg, &sr, sizeof(sr)))
1344 return -EFAULT;
1345 return 0;
1346 }
1347 read_unlock(&mrt_lock);
1348 return -EADDRNOTAVAIL;
1349 default:
1350 return -ENOIOCTLCMD;
1351 }
1352}
1353
1354
1355static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1356{
1357 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
1358 return dst_output(skb);
1359}
1360
1361/*
1362 * Processing handlers for ip6mr_forward
1363 */
1364
1365static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1366{
1367 struct ipv6hdr *ipv6h;
1368 struct mif_device *vif = &vif6_table[vifi];
1369 struct net_device *dev;
1370 struct dst_entry *dst;
1371 struct flowi fl;
1372
1373 if (vif->dev == NULL)
1374 goto out_free;
1375
1376#ifdef CONFIG_IPV6_PIMSM_V2
1377 if (vif->flags & MIFF_REGISTER) {
1378 vif->pkt_out++;
1379 vif->bytes_out += skb->len;
1380 ((struct net_device_stats *)netdev_priv(vif->dev))->tx_bytes += skb->len;
1381 ((struct net_device_stats *)netdev_priv(vif->dev))->tx_packets++;
1382 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT);
1383 kfree_skb(skb);
1384 return 0;
1385 }
1386#endif
1387
1388 ipv6h = ipv6_hdr(skb);
1389
1390 fl = (struct flowi) {
1391 .oif = vif->link,
1392 .nl_u = { .ip6_u =
1393 { .daddr = ipv6h->daddr, }
1394 }
1395 };
1396
1397 dst = ip6_route_output(&init_net, NULL, &fl);
1398 if (!dst)
1399 goto out_free;
1400
1401 dst_release(skb->dst);
1402 skb->dst = dst;
1403
1404 /*
1405 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1406 * not only before forwarding, but after forwarding on all output
1407 * interfaces. It is clear, if mrouter runs a multicasting
1408 * program, it should receive packets not depending to what interface
1409 * program is joined.
1410 * If we will not make it, the program will have to join on all
1411 * interfaces. On the other hand, multihoming host (or router, but
1412 * not mrouter) cannot join to more than one interface - it will
1413 * result in receiving multiple packets.
1414 */
1415 dev = vif->dev;
1416 skb->dev = dev;
1417 vif->pkt_out++;
1418 vif->bytes_out += skb->len;
1419
1420 /* We are about to write */
1421 /* XXX: extension headers? */
1422 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1423 goto out_free;
1424
1425 ipv6h = ipv6_hdr(skb);
1426 ipv6h->hop_limit--;
1427
1428 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1429
1430 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1431 ip6mr_forward2_finish);
1432
1433out_free:
1434 kfree_skb(skb);
1435 return 0;
1436}
1437
1438static int ip6mr_find_vif(struct net_device *dev)
1439{
1440 int ct;
1441 for (ct = maxvif - 1; ct >= 0; ct--) {
1442 if (vif6_table[ct].dev == dev)
1443 break;
1444 }
1445 return ct;
1446}
1447
1448static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1449{
1450 int psend = -1;
1451 int vif, ct;
1452
1453 vif = cache->mf6c_parent;
1454 cache->mfc_un.res.pkt++;
1455 cache->mfc_un.res.bytes += skb->len;
1456
1457 /*
1458 * Wrong interface: drop packet and (maybe) send PIM assert.
1459 */
1460 if (vif6_table[vif].dev != skb->dev) {
1461 int true_vifi;
1462
1463 cache->mfc_un.res.wrong_if++;
1464 true_vifi = ip6mr_find_vif(skb->dev);
1465
1466 if (true_vifi >= 0 && mroute_do_assert &&
1467 /* pimsm uses asserts, when switching from RPT to SPT,
1468 so that we cannot check that packet arrived on an oif.
1469 It is bad, but otherwise we would need to move pretty
1470 large chunk of pimd to kernel. Ough... --ANK
1471 */
1472 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1473 time_after(jiffies,
1474 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1475 cache->mfc_un.res.last_assert = jiffies;
1476 ip6mr_cache_report(skb, true_vifi, MRT6MSG_WRONGMIF);
1477 }
1478 goto dont_forward;
1479 }
1480
1481 vif6_table[vif].pkt_in++;
1482 vif6_table[vif].bytes_in += skb->len;
1483
1484 /*
1485 * Forward the frame
1486 */
1487 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1488 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1489 if (psend != -1) {
1490 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1491 if (skb2)
1492 ip6mr_forward2(skb2, cache, psend);
1493 }
1494 psend = ct;
1495 }
1496 }
1497 if (psend != -1) {
1498 ip6mr_forward2(skb, cache, psend);
1499 return 0;
1500 }
1501
1502dont_forward:
1503 kfree_skb(skb);
1504 return 0;
1505}
1506
1507
1508/*
1509 * Multicast packets for forwarding arrive here
1510 */
1511
1512int ip6_mr_input(struct sk_buff *skb)
1513{
1514 struct mfc6_cache *cache;
1515
1516 read_lock(&mrt_lock);
1517 cache = ip6mr_cache_find(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1518
1519 /*
1520 * No usable cache entry
1521 */
1522 if (cache == NULL) {
1523 int vif;
1524
1525 vif = ip6mr_find_vif(skb->dev);
1526 if (vif >= 0) {
1527 int err = ip6mr_cache_unresolved(vif, skb);
1528 read_unlock(&mrt_lock);
1529
1530 return err;
1531 }
1532 read_unlock(&mrt_lock);
1533 kfree_skb(skb);
1534 return -ENODEV;
1535 }
1536
1537 ip6_mr_forward(skb, cache);
1538
1539 read_unlock(&mrt_lock);
1540
1541 return 0;
1542}
1543
1544
1545static int
1546ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1547{
1548 int ct;
1549 struct rtnexthop *nhp;
1550 struct net_device *dev = vif6_table[c->mf6c_parent].dev;
1551 u8 *b = skb_tail_pointer(skb);
1552 struct rtattr *mp_head;
1553
1554 if (dev)
1555 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1556
1557 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1558
1559 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1560 if (c->mfc_un.res.ttls[ct] < 255) {
1561 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1562 goto rtattr_failure;
1563 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1564 nhp->rtnh_flags = 0;
1565 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1566 nhp->rtnh_ifindex = vif6_table[ct].dev->ifindex;
1567 nhp->rtnh_len = sizeof(*nhp);
1568 }
1569 }
1570 mp_head->rta_type = RTA_MULTIPATH;
1571 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1572 rtm->rtm_type = RTN_MULTICAST;
1573 return 1;
1574
1575rtattr_failure:
1576 nlmsg_trim(skb, b);
1577 return -EMSGSIZE;
1578}
1579
1580int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1581{
1582 int err;
1583 struct mfc6_cache *cache;
1584 struct rt6_info *rt = (struct rt6_info *)skb->dst;
1585
1586 read_lock(&mrt_lock);
1587 cache = ip6mr_cache_find(&rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1588
1589 if (!cache) {
1590 struct sk_buff *skb2;
1591 struct ipv6hdr *iph;
1592 struct net_device *dev;
1593 int vif;
1594
1595 if (nowait) {
1596 read_unlock(&mrt_lock);
1597 return -EAGAIN;
1598 }
1599
1600 dev = skb->dev;
1601 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1602 read_unlock(&mrt_lock);
1603 return -ENODEV;
1604 }
1605
1606 /* really correct? */
1607 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1608 if (!skb2) {
1609 read_unlock(&mrt_lock);
1610 return -ENOMEM;
1611 }
1612
1613 skb_reset_transport_header(skb2);
1614
1615 skb_put(skb2, sizeof(struct ipv6hdr));
1616 skb_reset_network_header(skb2);
1617
1618 iph = ipv6_hdr(skb2);
1619 iph->version = 0;
1620 iph->priority = 0;
1621 iph->flow_lbl[0] = 0;
1622 iph->flow_lbl[1] = 0;
1623 iph->flow_lbl[2] = 0;
1624 iph->payload_len = 0;
1625 iph->nexthdr = IPPROTO_NONE;
1626 iph->hop_limit = 0;
1627 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1628 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1629
1630 err = ip6mr_cache_unresolved(vif, skb2);
1631 read_unlock(&mrt_lock);
1632
1633 return err;
1634 }
1635
1636 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1637 cache->mfc_flags |= MFC_NOTIFY;
1638
1639 err = ip6mr_fill_mroute(skb, cache, rtm);
1640 read_unlock(&mrt_lock);
1641 return err;
1642}
1643
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index bf2a686aa13d..06de9d0e1f6b 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -16,7 +16,6 @@
16 * 16 *
17 * FIXME: Make the setsockopt code POSIX compliant: That is 17 * FIXME: Make the setsockopt code POSIX compliant: That is
18 * 18 *
19 * o Return -EINVAL for setsockopt of short lengths
20 * o Truncate getsockopt returns 19 * o Truncate getsockopt returns
21 * o Return an optlen of the truncated length if need be 20 * o Return an optlen of the truncated length if need be
22 * 21 *
@@ -33,6 +32,7 @@
33#include <linux/sockios.h> 32#include <linux/sockios.h>
34#include <linux/net.h> 33#include <linux/net.h>
35#include <linux/in6.h> 34#include <linux/in6.h>
35#include <linux/mroute6.h>
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/if_arp.h> 37#include <linux/if_arp.h>
38#include <linux/init.h> 38#include <linux/init.h>
@@ -57,118 +57,6 @@
57 57
58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; 58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
59 59
60static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
61 int proto)
62{
63 struct inet6_protocol *ops = NULL;
64
65 for (;;) {
66 struct ipv6_opt_hdr *opth;
67 int len;
68
69 if (proto != NEXTHDR_HOP) {
70 ops = rcu_dereference(inet6_protos[proto]);
71
72 if (unlikely(!ops))
73 break;
74
75 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
76 break;
77 }
78
79 if (unlikely(!pskb_may_pull(skb, 8)))
80 break;
81
82 opth = (void *)skb->data;
83 len = opth->hdrlen * 8 + 8;
84
85 if (unlikely(!pskb_may_pull(skb, len)))
86 break;
87
88 proto = opth->nexthdr;
89 __skb_pull(skb, len);
90 }
91
92 return ops;
93}
94
95static int ipv6_gso_send_check(struct sk_buff *skb)
96{
97 struct ipv6hdr *ipv6h;
98 struct inet6_protocol *ops;
99 int err = -EINVAL;
100
101 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
102 goto out;
103
104 ipv6h = ipv6_hdr(skb);
105 __skb_pull(skb, sizeof(*ipv6h));
106 err = -EPROTONOSUPPORT;
107
108 rcu_read_lock();
109 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
110 if (likely(ops && ops->gso_send_check)) {
111 skb_reset_transport_header(skb);
112 err = ops->gso_send_check(skb);
113 }
114 rcu_read_unlock();
115
116out:
117 return err;
118}
119
120static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
121{
122 struct sk_buff *segs = ERR_PTR(-EINVAL);
123 struct ipv6hdr *ipv6h;
124 struct inet6_protocol *ops;
125
126 if (!(features & NETIF_F_V6_CSUM))
127 features &= ~NETIF_F_SG;
128
129 if (unlikely(skb_shinfo(skb)->gso_type &
130 ~(SKB_GSO_UDP |
131 SKB_GSO_DODGY |
132 SKB_GSO_TCP_ECN |
133 SKB_GSO_TCPV6 |
134 0)))
135 goto out;
136
137 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
138 goto out;
139
140 ipv6h = ipv6_hdr(skb);
141 __skb_pull(skb, sizeof(*ipv6h));
142 segs = ERR_PTR(-EPROTONOSUPPORT);
143
144 rcu_read_lock();
145 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
146 if (likely(ops && ops->gso_segment)) {
147 skb_reset_transport_header(skb);
148 segs = ops->gso_segment(skb, features);
149 }
150 rcu_read_unlock();
151
152 if (unlikely(IS_ERR(segs)))
153 goto out;
154
155 for (skb = segs; skb; skb = skb->next) {
156 ipv6h = ipv6_hdr(skb);
157 ipv6h->payload_len = htons(skb->len - skb->mac_len -
158 sizeof(*ipv6h));
159 }
160
161out:
162 return segs;
163}
164
165static struct packet_type ipv6_packet_type = {
166 .type = __constant_htons(ETH_P_IPV6),
167 .func = ipv6_rcv,
168 .gso_send_check = ipv6_gso_send_check,
169 .gso_segment = ipv6_gso_segment,
170};
171
172struct ip6_ra_chain *ip6_ra_chain; 60struct ip6_ra_chain *ip6_ra_chain;
173DEFINE_RWLOCK(ip6_ra_lock); 61DEFINE_RWLOCK(ip6_ra_lock);
174 62
@@ -215,25 +103,59 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *))
215 return 0; 103 return 0;
216} 104}
217 105
106static
107struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
108 struct ipv6_txoptions *opt)
109{
110 if (inet_sk(sk)->is_icsk) {
111 if (opt &&
112 !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
113 inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
114 struct inet_connection_sock *icsk = inet_csk(sk);
115 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
116 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
117 }
118 opt = xchg(&inet6_sk(sk)->opt, opt);
119 } else {
120 write_lock(&sk->sk_dst_lock);
121 opt = xchg(&inet6_sk(sk)->opt, opt);
122 write_unlock(&sk->sk_dst_lock);
123 }
124 sk_dst_reset(sk);
125
126 return opt;
127}
128
218static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, 129static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
219 char __user *optval, int optlen) 130 char __user *optval, int optlen)
220{ 131{
221 struct ipv6_pinfo *np = inet6_sk(sk); 132 struct ipv6_pinfo *np = inet6_sk(sk);
133 struct net *net = sock_net(sk);
222 int val, valbool; 134 int val, valbool;
223 int retv = -ENOPROTOOPT; 135 int retv = -ENOPROTOOPT;
224 136
225 if (optval == NULL) 137 if (optval == NULL)
226 val=0; 138 val=0;
227 else if (get_user(val, (int __user *) optval)) 139 else {
228 return -EFAULT; 140 if (optlen >= sizeof(int)) {
141 if (get_user(val, (int __user *) optval))
142 return -EFAULT;
143 } else
144 val = 0;
145 }
229 146
230 valbool = (val!=0); 147 valbool = (val!=0);
231 148
149 if (ip6_mroute_opt(optname))
150 return ip6_mroute_setsockopt(sk, optname, optval, optlen);
151
232 lock_sock(sk); 152 lock_sock(sk);
233 153
234 switch (optname) { 154 switch (optname) {
235 155
236 case IPV6_ADDRFORM: 156 case IPV6_ADDRFORM:
157 if (optlen < sizeof(int))
158 goto e_inval;
237 if (val == PF_INET) { 159 if (val == PF_INET) {
238 struct ipv6_txoptions *opt; 160 struct ipv6_txoptions *opt;
239 struct sk_buff *pktopt; 161 struct sk_buff *pktopt;
@@ -266,10 +188,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
266 188
267 if (sk->sk_protocol == IPPROTO_TCP) { 189 if (sk->sk_protocol == IPPROTO_TCP) {
268 struct inet_connection_sock *icsk = inet_csk(sk); 190 struct inet_connection_sock *icsk = inet_csk(sk);
269
270 local_bh_disable(); 191 local_bh_disable();
271 sock_prot_inuse_add(sk->sk_prot, -1); 192 sock_prot_inuse_add(net, sk->sk_prot, -1);
272 sock_prot_inuse_add(&tcp_prot, 1); 193 sock_prot_inuse_add(net, &tcp_prot, 1);
273 local_bh_enable(); 194 local_bh_enable();
274 sk->sk_prot = &tcp_prot; 195 sk->sk_prot = &tcp_prot;
275 icsk->icsk_af_ops = &ipv4_specific; 196 icsk->icsk_af_ops = &ipv4_specific;
@@ -282,8 +203,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
282 if (sk->sk_protocol == IPPROTO_UDPLITE) 203 if (sk->sk_protocol == IPPROTO_UDPLITE)
283 prot = &udplite_prot; 204 prot = &udplite_prot;
284 local_bh_disable(); 205 local_bh_disable();
285 sock_prot_inuse_add(sk->sk_prot, -1); 206 sock_prot_inuse_add(net, sk->sk_prot, -1);
286 sock_prot_inuse_add(prot, 1); 207 sock_prot_inuse_add(net, prot, 1);
287 local_bh_enable(); 208 local_bh_enable();
288 sk->sk_prot = prot; 209 sk->sk_prot = prot;
289 sk->sk_socket->ops = &inet_dgram_ops; 210 sk->sk_socket->ops = &inet_dgram_ops;
@@ -309,63 +230,86 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
309 goto e_inval; 230 goto e_inval;
310 231
311 case IPV6_V6ONLY: 232 case IPV6_V6ONLY:
312 if (inet_sk(sk)->num) 233 if (optlen < sizeof(int) ||
234 inet_sk(sk)->num)
313 goto e_inval; 235 goto e_inval;
314 np->ipv6only = valbool; 236 np->ipv6only = valbool;
315 retv = 0; 237 retv = 0;
316 break; 238 break;
317 239
318 case IPV6_RECVPKTINFO: 240 case IPV6_RECVPKTINFO:
241 if (optlen < sizeof(int))
242 goto e_inval;
319 np->rxopt.bits.rxinfo = valbool; 243 np->rxopt.bits.rxinfo = valbool;
320 retv = 0; 244 retv = 0;
321 break; 245 break;
322 246
323 case IPV6_2292PKTINFO: 247 case IPV6_2292PKTINFO:
248 if (optlen < sizeof(int))
249 goto e_inval;
324 np->rxopt.bits.rxoinfo = valbool; 250 np->rxopt.bits.rxoinfo = valbool;
325 retv = 0; 251 retv = 0;
326 break; 252 break;
327 253
328 case IPV6_RECVHOPLIMIT: 254 case IPV6_RECVHOPLIMIT:
255 if (optlen < sizeof(int))
256 goto e_inval;
329 np->rxopt.bits.rxhlim = valbool; 257 np->rxopt.bits.rxhlim = valbool;
330 retv = 0; 258 retv = 0;
331 break; 259 break;
332 260
333 case IPV6_2292HOPLIMIT: 261 case IPV6_2292HOPLIMIT:
262 if (optlen < sizeof(int))
263 goto e_inval;
334 np->rxopt.bits.rxohlim = valbool; 264 np->rxopt.bits.rxohlim = valbool;
335 retv = 0; 265 retv = 0;
336 break; 266 break;
337 267
338 case IPV6_RECVRTHDR: 268 case IPV6_RECVRTHDR:
269 if (optlen < sizeof(int))
270 goto e_inval;
339 np->rxopt.bits.srcrt = valbool; 271 np->rxopt.bits.srcrt = valbool;
340 retv = 0; 272 retv = 0;
341 break; 273 break;
342 274
343 case IPV6_2292RTHDR: 275 case IPV6_2292RTHDR:
276 if (optlen < sizeof(int))
277 goto e_inval;
344 np->rxopt.bits.osrcrt = valbool; 278 np->rxopt.bits.osrcrt = valbool;
345 retv = 0; 279 retv = 0;
346 break; 280 break;
347 281
348 case IPV6_RECVHOPOPTS: 282 case IPV6_RECVHOPOPTS:
283 if (optlen < sizeof(int))
284 goto e_inval;
349 np->rxopt.bits.hopopts = valbool; 285 np->rxopt.bits.hopopts = valbool;
350 retv = 0; 286 retv = 0;
351 break; 287 break;
352 288
353 case IPV6_2292HOPOPTS: 289 case IPV6_2292HOPOPTS:
290 if (optlen < sizeof(int))
291 goto e_inval;
354 np->rxopt.bits.ohopopts = valbool; 292 np->rxopt.bits.ohopopts = valbool;
355 retv = 0; 293 retv = 0;
356 break; 294 break;
357 295
358 case IPV6_RECVDSTOPTS: 296 case IPV6_RECVDSTOPTS:
297 if (optlen < sizeof(int))
298 goto e_inval;
359 np->rxopt.bits.dstopts = valbool; 299 np->rxopt.bits.dstopts = valbool;
360 retv = 0; 300 retv = 0;
361 break; 301 break;
362 302
363 case IPV6_2292DSTOPTS: 303 case IPV6_2292DSTOPTS:
304 if (optlen < sizeof(int))
305 goto e_inval;
364 np->rxopt.bits.odstopts = valbool; 306 np->rxopt.bits.odstopts = valbool;
365 retv = 0; 307 retv = 0;
366 break; 308 break;
367 309
368 case IPV6_TCLASS: 310 case IPV6_TCLASS:
311 if (optlen < sizeof(int))
312 goto e_inval;
369 if (val < -1 || val > 0xff) 313 if (val < -1 || val > 0xff)
370 goto e_inval; 314 goto e_inval;
371 np->tclass = val; 315 np->tclass = val;
@@ -373,11 +317,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
373 break; 317 break;
374 318
375 case IPV6_RECVTCLASS: 319 case IPV6_RECVTCLASS:
320 if (optlen < sizeof(int))
321 goto e_inval;
376 np->rxopt.bits.rxtclass = valbool; 322 np->rxopt.bits.rxtclass = valbool;
377 retv = 0; 323 retv = 0;
378 break; 324 break;
379 325
380 case IPV6_FLOWINFO: 326 case IPV6_FLOWINFO:
327 if (optlen < sizeof(int))
328 goto e_inval;
381 np->rxopt.bits.rxflow = valbool; 329 np->rxopt.bits.rxflow = valbool;
382 retv = 0; 330 retv = 0;
383 break; 331 break;
@@ -396,9 +344,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
396 if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW)) 344 if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW))
397 break; 345 break;
398 346
399 retv = -EINVAL; 347 if (optlen < sizeof(struct ipv6_opt_hdr) ||
400 if (optlen & 0x7 || optlen > 8 * 255) 348 optlen & 0x7 || optlen > 8 * 255)
401 break; 349 goto e_inval;
402 350
403 opt = ipv6_renew_options(sk, np->opt, optname, 351 opt = ipv6_renew_options(sk, np->opt, optname,
404 (struct ipv6_opt_hdr __user *)optval, 352 (struct ipv6_opt_hdr __user *)optval,
@@ -426,25 +374,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
426 } 374 }
427 375
428 retv = 0; 376 retv = 0;
429 if (inet_sk(sk)->is_icsk) { 377 opt = ipv6_update_options(sk, opt);
430 if (opt) {
431 struct inet_connection_sock *icsk = inet_csk(sk);
432 if (!((1 << sk->sk_state) &
433 (TCPF_LISTEN | TCPF_CLOSE))
434 && inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
435 icsk->icsk_ext_hdr_len =
436 opt->opt_flen + opt->opt_nflen;
437 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
438 }
439 }
440 opt = xchg(&np->opt, opt);
441 sk_dst_reset(sk);
442 } else {
443 write_lock(&sk->sk_dst_lock);
444 opt = xchg(&np->opt, opt);
445 write_unlock(&sk->sk_dst_lock);
446 sk_dst_reset(sk);
447 }
448sticky_done: 378sticky_done:
449 if (opt) 379 if (opt)
450 sock_kfree_s(sk, opt, opt->tot_len); 380 sock_kfree_s(sk, opt, opt->tot_len);
@@ -490,32 +420,15 @@ sticky_done:
490 goto done; 420 goto done;
491update: 421update:
492 retv = 0; 422 retv = 0;
493 if (inet_sk(sk)->is_icsk) { 423 opt = ipv6_update_options(sk, opt);
494 if (opt) {
495 struct inet_connection_sock *icsk = inet_csk(sk);
496 if (!((1 << sk->sk_state) &
497 (TCPF_LISTEN | TCPF_CLOSE))
498 && inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
499 icsk->icsk_ext_hdr_len =
500 opt->opt_flen + opt->opt_nflen;
501 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
502 }
503 }
504 opt = xchg(&np->opt, opt);
505 sk_dst_reset(sk);
506 } else {
507 write_lock(&sk->sk_dst_lock);
508 opt = xchg(&np->opt, opt);
509 write_unlock(&sk->sk_dst_lock);
510 sk_dst_reset(sk);
511 }
512
513done: 424done:
514 if (opt) 425 if (opt)
515 sock_kfree_s(sk, opt, opt->tot_len); 426 sock_kfree_s(sk, opt, opt->tot_len);
516 break; 427 break;
517 } 428 }
518 case IPV6_UNICAST_HOPS: 429 case IPV6_UNICAST_HOPS:
430 if (optlen < sizeof(int))
431 goto e_inval;
519 if (val > 255 || val < -1) 432 if (val > 255 || val < -1)
520 goto e_inval; 433 goto e_inval;
521 np->hop_limit = val; 434 np->hop_limit = val;
@@ -525,6 +438,8 @@ done:
525 case IPV6_MULTICAST_HOPS: 438 case IPV6_MULTICAST_HOPS:
526 if (sk->sk_type == SOCK_STREAM) 439 if (sk->sk_type == SOCK_STREAM)
527 goto e_inval; 440 goto e_inval;
441 if (optlen < sizeof(int))
442 goto e_inval;
528 if (val > 255 || val < -1) 443 if (val > 255 || val < -1)
529 goto e_inval; 444 goto e_inval;
530 np->mcast_hops = val; 445 np->mcast_hops = val;
@@ -532,6 +447,8 @@ done:
532 break; 447 break;
533 448
534 case IPV6_MULTICAST_LOOP: 449 case IPV6_MULTICAST_LOOP:
450 if (optlen < sizeof(int))
451 goto e_inval;
535 np->mc_loop = valbool; 452 np->mc_loop = valbool;
536 retv = 0; 453 retv = 0;
537 break; 454 break;
@@ -539,12 +456,14 @@ done:
539 case IPV6_MULTICAST_IF: 456 case IPV6_MULTICAST_IF:
540 if (sk->sk_type == SOCK_STREAM) 457 if (sk->sk_type == SOCK_STREAM)
541 goto e_inval; 458 goto e_inval;
459 if (optlen < sizeof(int))
460 goto e_inval;
542 461
543 if (val) { 462 if (val) {
544 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) 463 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
545 goto e_inval; 464 goto e_inval;
546 465
547 if (__dev_get_by_index(&init_net, val) == NULL) { 466 if (__dev_get_by_index(net, val) == NULL) {
548 retv = -ENODEV; 467 retv = -ENODEV;
549 break; 468 break;
550 } 469 }
@@ -557,6 +476,9 @@ done:
557 { 476 {
558 struct ipv6_mreq mreq; 477 struct ipv6_mreq mreq;
559 478
479 if (optlen < sizeof(struct ipv6_mreq))
480 goto e_inval;
481
560 retv = -EPROTO; 482 retv = -EPROTO;
561 if (inet_sk(sk)->is_icsk) 483 if (inet_sk(sk)->is_icsk)
562 break; 484 break;
@@ -576,7 +498,7 @@ done:
576 { 498 {
577 struct ipv6_mreq mreq; 499 struct ipv6_mreq mreq;
578 500
579 if (optlen != sizeof(struct ipv6_mreq)) 501 if (optlen < sizeof(struct ipv6_mreq))
580 goto e_inval; 502 goto e_inval;
581 503
582 retv = -EFAULT; 504 retv = -EFAULT;
@@ -595,6 +517,9 @@ done:
595 struct group_req greq; 517 struct group_req greq;
596 struct sockaddr_in6 *psin6; 518 struct sockaddr_in6 *psin6;
597 519
520 if (optlen < sizeof(struct group_req))
521 goto e_inval;
522
598 retv = -EFAULT; 523 retv = -EFAULT;
599 if (copy_from_user(&greq, optval, sizeof(struct group_req))) 524 if (copy_from_user(&greq, optval, sizeof(struct group_req)))
600 break; 525 break;
@@ -619,7 +544,7 @@ done:
619 struct group_source_req greqs; 544 struct group_source_req greqs;
620 int omode, add; 545 int omode, add;
621 546
622 if (optlen != sizeof(struct group_source_req)) 547 if (optlen < sizeof(struct group_source_req))
623 goto e_inval; 548 goto e_inval;
624 if (copy_from_user(&greqs, optval, sizeof(greqs))) { 549 if (copy_from_user(&greqs, optval, sizeof(greqs))) {
625 retv = -EFAULT; 550 retv = -EFAULT;
@@ -693,27 +618,37 @@ done:
693 break; 618 break;
694 } 619 }
695 case IPV6_ROUTER_ALERT: 620 case IPV6_ROUTER_ALERT:
621 if (optlen < sizeof(int))
622 goto e_inval;
696 retv = ip6_ra_control(sk, val, NULL); 623 retv = ip6_ra_control(sk, val, NULL);
697 break; 624 break;
698 case IPV6_MTU_DISCOVER: 625 case IPV6_MTU_DISCOVER:
626 if (optlen < sizeof(int))
627 goto e_inval;
699 if (val<0 || val>3) 628 if (val<0 || val>3)
700 goto e_inval; 629 goto e_inval;
701 np->pmtudisc = val; 630 np->pmtudisc = val;
702 retv = 0; 631 retv = 0;
703 break; 632 break;
704 case IPV6_MTU: 633 case IPV6_MTU:
634 if (optlen < sizeof(int))
635 goto e_inval;
705 if (val && val < IPV6_MIN_MTU) 636 if (val && val < IPV6_MIN_MTU)
706 goto e_inval; 637 goto e_inval;
707 np->frag_size = val; 638 np->frag_size = val;
708 retv = 0; 639 retv = 0;
709 break; 640 break;
710 case IPV6_RECVERR: 641 case IPV6_RECVERR:
642 if (optlen < sizeof(int))
643 goto e_inval;
711 np->recverr = valbool; 644 np->recverr = valbool;
712 if (!val) 645 if (!val)
713 skb_queue_purge(&sk->sk_error_queue); 646 skb_queue_purge(&sk->sk_error_queue);
714 retv = 0; 647 retv = 0;
715 break; 648 break;
716 case IPV6_FLOWINFO_SEND: 649 case IPV6_FLOWINFO_SEND:
650 if (optlen < sizeof(int))
651 goto e_inval;
717 np->sndflow = valbool; 652 np->sndflow = valbool;
718 retv = 0; 653 retv = 0;
719 break; 654 break;
@@ -728,7 +663,70 @@ done:
728 retv = xfrm_user_policy(sk, optname, optval, optlen); 663 retv = xfrm_user_policy(sk, optname, optval, optlen);
729 break; 664 break;
730 665
666 case IPV6_ADDR_PREFERENCES:
667 {
668 unsigned int pref = 0;
669 unsigned int prefmask = ~0;
670
671 if (optlen < sizeof(int))
672 goto e_inval;
673
674 retv = -EINVAL;
675
676 /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
677 switch (val & (IPV6_PREFER_SRC_PUBLIC|
678 IPV6_PREFER_SRC_TMP|
679 IPV6_PREFER_SRC_PUBTMP_DEFAULT)) {
680 case IPV6_PREFER_SRC_PUBLIC:
681 pref |= IPV6_PREFER_SRC_PUBLIC;
682 break;
683 case IPV6_PREFER_SRC_TMP:
684 pref |= IPV6_PREFER_SRC_TMP;
685 break;
686 case IPV6_PREFER_SRC_PUBTMP_DEFAULT:
687 break;
688 case 0:
689 goto pref_skip_pubtmp;
690 default:
691 goto e_inval;
692 }
693
694 prefmask &= ~(IPV6_PREFER_SRC_PUBLIC|
695 IPV6_PREFER_SRC_TMP);
696pref_skip_pubtmp:
697
698 /* check HOME/COA conflicts */
699 switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) {
700 case IPV6_PREFER_SRC_HOME:
701 break;
702 case IPV6_PREFER_SRC_COA:
703 pref |= IPV6_PREFER_SRC_COA;
704 case 0:
705 goto pref_skip_coa;
706 default:
707 goto e_inval;
708 }
709
710 prefmask &= ~IPV6_PREFER_SRC_COA;
711pref_skip_coa:
712
713 /* check CGA/NONCGA conflicts */
714 switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) {
715 case IPV6_PREFER_SRC_CGA:
716 case IPV6_PREFER_SRC_NONCGA:
717 case 0:
718 break;
719 default:
720 goto e_inval;
721 }
722
723 np->srcprefs = (np->srcprefs & prefmask) | pref;
724 retv = 0;
725
726 break;
727 }
731 } 728 }
729
732 release_sock(sk); 730 release_sock(sk);
733 731
734 return retv; 732 return retv;
@@ -839,6 +837,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
839 int len; 837 int len;
840 int val; 838 int val;
841 839
840 if (ip6_mroute_opt(optname))
841 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
842
842 if (get_user(len, optlen)) 843 if (get_user(len, optlen))
843 return -EFAULT; 844 return -EFAULT;
844 switch (optname) { 845 switch (optname) {
@@ -1015,9 +1016,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1015 dst = sk_dst_get(sk); 1016 dst = sk_dst_get(sk);
1016 if (dst) { 1017 if (dst) {
1017 if (val < 0) 1018 if (val < 0)
1018 val = dst_metric(dst, RTAX_HOPLIMIT); 1019 val = ip6_dst_hoplimit(dst);
1019 if (val < 0)
1020 val = ipv6_get_hoplimit(dst->dev);
1021 dst_release(dst); 1020 dst_release(dst);
1022 } 1021 }
1023 if (val < 0) 1022 if (val < 0)
@@ -1045,6 +1044,24 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1045 val = np->sndflow; 1044 val = np->sndflow;
1046 break; 1045 break;
1047 1046
1047 case IPV6_ADDR_PREFERENCES:
1048 val = 0;
1049
1050 if (np->srcprefs & IPV6_PREFER_SRC_TMP)
1051 val |= IPV6_PREFER_SRC_TMP;
1052 else if (np->srcprefs & IPV6_PREFER_SRC_PUBLIC)
1053 val |= IPV6_PREFER_SRC_PUBLIC;
1054 else {
1055 /* XXX: should we return system default? */
1056 val |= IPV6_PREFER_SRC_PUBTMP_DEFAULT;
1057 }
1058
1059 if (np->srcprefs & IPV6_PREFER_SRC_COA)
1060 val |= IPV6_PREFER_SRC_COA;
1061 else
1062 val |= IPV6_PREFER_SRC_HOME;
1063 break;
1064
1048 default: 1065 default:
1049 return -ENOPROTOOPT; 1066 return -ENOPROTOOPT;
1050 } 1067 }
@@ -1128,13 +1145,3 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
1128EXPORT_SYMBOL(compat_ipv6_getsockopt); 1145EXPORT_SYMBOL(compat_ipv6_getsockopt);
1129#endif 1146#endif
1130 1147
1131int __init ipv6_packet_init(void)
1132{
1133 dev_add_pack(&ipv6_packet_type);
1134 return 0;
1135}
1136
1137void ipv6_packet_cleanup(void)
1138{
1139 dev_remove_pack(&ipv6_packet_type);
1140}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ab228d1ea114..54f91efdae58 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -59,6 +59,7 @@
59#include <net/ndisc.h> 59#include <net/ndisc.h>
60#include <net/addrconf.h> 60#include <net/addrconf.h>
61#include <net/ip6_route.h> 61#include <net/ip6_route.h>
62#include <net/inet_common.h>
62 63
63#include <net/ip6_checksum.h> 64#include <net/ip6_checksum.h>
64 65
@@ -126,10 +127,6 @@ static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
126/* Big mc list lock for all the sockets */ 127/* Big mc list lock for all the sockets */
127static DEFINE_RWLOCK(ipv6_sk_mc_lock); 128static DEFINE_RWLOCK(ipv6_sk_mc_lock);
128 129
129static struct socket *igmp6_socket;
130
131int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr);
132
133static void igmp6_join_group(struct ifmcaddr6 *ma); 130static void igmp6_join_group(struct ifmcaddr6 *ma);
134static void igmp6_leave_group(struct ifmcaddr6 *ma); 131static void igmp6_leave_group(struct ifmcaddr6 *ma);
135static void igmp6_timer_handler(unsigned long data); 132static void igmp6_timer_handler(unsigned long data);
@@ -178,11 +175,12 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
178 * socket join on multicast group 175 * socket join on multicast group
179 */ 176 */
180 177
181int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr) 178int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
182{ 179{
183 struct net_device *dev = NULL; 180 struct net_device *dev = NULL;
184 struct ipv6_mc_socklist *mc_lst; 181 struct ipv6_mc_socklist *mc_lst;
185 struct ipv6_pinfo *np = inet6_sk(sk); 182 struct ipv6_pinfo *np = inet6_sk(sk);
183 struct net *net = sock_net(sk);
186 int err; 184 int err;
187 185
188 if (!ipv6_addr_is_multicast(addr)) 186 if (!ipv6_addr_is_multicast(addr))
@@ -208,14 +206,14 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr)
208 206
209 if (ifindex == 0) { 207 if (ifindex == 0) {
210 struct rt6_info *rt; 208 struct rt6_info *rt;
211 rt = rt6_lookup(addr, NULL, 0, 0); 209 rt = rt6_lookup(net, addr, NULL, 0, 0);
212 if (rt) { 210 if (rt) {
213 dev = rt->rt6i_dev; 211 dev = rt->rt6i_dev;
214 dev_hold(dev); 212 dev_hold(dev);
215 dst_release(&rt->u.dst); 213 dst_release(&rt->u.dst);
216 } 214 }
217 } else 215 } else
218 dev = dev_get_by_index(&init_net, ifindex); 216 dev = dev_get_by_index(net, ifindex);
219 217
220 if (dev == NULL) { 218 if (dev == NULL) {
221 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 219 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
@@ -252,10 +250,11 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr)
252/* 250/*
253 * socket leave on multicast group 251 * socket leave on multicast group
254 */ 252 */
255int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) 253int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
256{ 254{
257 struct ipv6_pinfo *np = inet6_sk(sk); 255 struct ipv6_pinfo *np = inet6_sk(sk);
258 struct ipv6_mc_socklist *mc_lst, **lnk; 256 struct ipv6_mc_socklist *mc_lst, **lnk;
257 struct net *net = sock_net(sk);
259 258
260 write_lock_bh(&ipv6_sk_mc_lock); 259 write_lock_bh(&ipv6_sk_mc_lock);
261 for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) { 260 for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) {
@@ -266,7 +265,8 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
266 *lnk = mc_lst->next; 265 *lnk = mc_lst->next;
267 write_unlock_bh(&ipv6_sk_mc_lock); 266 write_unlock_bh(&ipv6_sk_mc_lock);
268 267
269 if ((dev = dev_get_by_index(&init_net, mc_lst->ifindex)) != NULL) { 268 dev = dev_get_by_index(net, mc_lst->ifindex);
269 if (dev != NULL) {
270 struct inet6_dev *idev = in6_dev_get(dev); 270 struct inet6_dev *idev = in6_dev_get(dev);
271 271
272 (void) ip6_mc_leave_src(sk, mc_lst, idev); 272 (void) ip6_mc_leave_src(sk, mc_lst, idev);
@@ -286,7 +286,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
286 return -EADDRNOTAVAIL; 286 return -EADDRNOTAVAIL;
287} 287}
288 288
289static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex) 289static struct inet6_dev *ip6_mc_find_dev(struct net *net,
290 struct in6_addr *group,
291 int ifindex)
290{ 292{
291 struct net_device *dev = NULL; 293 struct net_device *dev = NULL;
292 struct inet6_dev *idev = NULL; 294 struct inet6_dev *idev = NULL;
@@ -294,14 +296,14 @@ static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex)
294 if (ifindex == 0) { 296 if (ifindex == 0) {
295 struct rt6_info *rt; 297 struct rt6_info *rt;
296 298
297 rt = rt6_lookup(group, NULL, 0, 0); 299 rt = rt6_lookup(net, group, NULL, 0, 0);
298 if (rt) { 300 if (rt) {
299 dev = rt->rt6i_dev; 301 dev = rt->rt6i_dev;
300 dev_hold(dev); 302 dev_hold(dev);
301 dst_release(&rt->u.dst); 303 dst_release(&rt->u.dst);
302 } 304 }
303 } else 305 } else
304 dev = dev_get_by_index(&init_net, ifindex); 306 dev = dev_get_by_index(net, ifindex);
305 307
306 if (!dev) 308 if (!dev)
307 return NULL; 309 return NULL;
@@ -324,6 +326,7 @@ void ipv6_sock_mc_close(struct sock *sk)
324{ 326{
325 struct ipv6_pinfo *np = inet6_sk(sk); 327 struct ipv6_pinfo *np = inet6_sk(sk);
326 struct ipv6_mc_socklist *mc_lst; 328 struct ipv6_mc_socklist *mc_lst;
329 struct net *net = sock_net(sk);
327 330
328 write_lock_bh(&ipv6_sk_mc_lock); 331 write_lock_bh(&ipv6_sk_mc_lock);
329 while ((mc_lst = np->ipv6_mc_list) != NULL) { 332 while ((mc_lst = np->ipv6_mc_list) != NULL) {
@@ -332,7 +335,7 @@ void ipv6_sock_mc_close(struct sock *sk)
332 np->ipv6_mc_list = mc_lst->next; 335 np->ipv6_mc_list = mc_lst->next;
333 write_unlock_bh(&ipv6_sk_mc_lock); 336 write_unlock_bh(&ipv6_sk_mc_lock);
334 337
335 dev = dev_get_by_index(&init_net, mc_lst->ifindex); 338 dev = dev_get_by_index(net, mc_lst->ifindex);
336 if (dev) { 339 if (dev) {
337 struct inet6_dev *idev = in6_dev_get(dev); 340 struct inet6_dev *idev = in6_dev_get(dev);
338 341
@@ -361,6 +364,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
361 struct inet6_dev *idev; 364 struct inet6_dev *idev;
362 struct ipv6_pinfo *inet6 = inet6_sk(sk); 365 struct ipv6_pinfo *inet6 = inet6_sk(sk);
363 struct ip6_sf_socklist *psl; 366 struct ip6_sf_socklist *psl;
367 struct net *net = sock_net(sk);
364 int i, j, rv; 368 int i, j, rv;
365 int leavegroup = 0; 369 int leavegroup = 0;
366 int pmclocked = 0; 370 int pmclocked = 0;
@@ -376,7 +380,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
376 if (!ipv6_addr_is_multicast(group)) 380 if (!ipv6_addr_is_multicast(group))
377 return -EINVAL; 381 return -EINVAL;
378 382
379 idev = ip6_mc_find_dev(group, pgsr->gsr_interface); 383 idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface);
380 if (!idev) 384 if (!idev)
381 return -ENODEV; 385 return -ENODEV;
382 dev = idev->dev; 386 dev = idev->dev;
@@ -500,6 +504,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
500 struct inet6_dev *idev; 504 struct inet6_dev *idev;
501 struct ipv6_pinfo *inet6 = inet6_sk(sk); 505 struct ipv6_pinfo *inet6 = inet6_sk(sk);
502 struct ip6_sf_socklist *newpsl, *psl; 506 struct ip6_sf_socklist *newpsl, *psl;
507 struct net *net = sock_net(sk);
503 int leavegroup = 0; 508 int leavegroup = 0;
504 int i, err; 509 int i, err;
505 510
@@ -511,7 +516,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
511 gsf->gf_fmode != MCAST_EXCLUDE) 516 gsf->gf_fmode != MCAST_EXCLUDE)
512 return -EINVAL; 517 return -EINVAL;
513 518
514 idev = ip6_mc_find_dev(group, gsf->gf_interface); 519 idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
515 520
516 if (!idev) 521 if (!idev)
517 return -ENODEV; 522 return -ENODEV;
@@ -592,13 +597,14 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
592 struct net_device *dev; 597 struct net_device *dev;
593 struct ipv6_pinfo *inet6 = inet6_sk(sk); 598 struct ipv6_pinfo *inet6 = inet6_sk(sk);
594 struct ip6_sf_socklist *psl; 599 struct ip6_sf_socklist *psl;
600 struct net *net = sock_net(sk);
595 601
596 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; 602 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
597 603
598 if (!ipv6_addr_is_multicast(group)) 604 if (!ipv6_addr_is_multicast(group))
599 return -EINVAL; 605 return -EINVAL;
600 606
601 idev = ip6_mc_find_dev(group, gsf->gf_interface); 607 idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
602 608
603 if (!idev) 609 if (!idev)
604 return -ENODEV; 610 return -ENODEV;
@@ -656,8 +662,8 @@ done:
656 return err; 662 return err;
657} 663}
658 664
659int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr, 665int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
660 struct in6_addr *src_addr) 666 const struct in6_addr *src_addr)
661{ 667{
662 struct ipv6_pinfo *np = inet6_sk(sk); 668 struct ipv6_pinfo *np = inet6_sk(sk);
663 struct ipv6_mc_socklist *mc; 669 struct ipv6_mc_socklist *mc;
@@ -863,7 +869,7 @@ static void mld_clear_delrec(struct inet6_dev *idev)
863/* 869/*
864 * device multicast group inc (add if not found) 870 * device multicast group inc (add if not found)
865 */ 871 */
866int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr) 872int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
867{ 873{
868 struct ifmcaddr6 *mc; 874 struct ifmcaddr6 *mc;
869 struct inet6_dev *idev; 875 struct inet6_dev *idev;
@@ -934,7 +940,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr)
934/* 940/*
935 * device multicast group del 941 * device multicast group del
936 */ 942 */
937int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr) 943int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
938{ 944{
939 struct ifmcaddr6 *ma, **map; 945 struct ifmcaddr6 *ma, **map;
940 946
@@ -959,7 +965,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr)
959 return -ENOENT; 965 return -ENOENT;
960} 966}
961 967
962int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr) 968int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
963{ 969{
964 struct inet6_dev *idev = in6_dev_get(dev); 970 struct inet6_dev *idev = in6_dev_get(dev);
965 int err; 971 int err;
@@ -1004,8 +1010,8 @@ int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
1004/* 1010/*
1005 * check if the interface/address pair is valid 1011 * check if the interface/address pair is valid
1006 */ 1012 */
1007int ipv6_chk_mcast_addr(struct net_device *dev, struct in6_addr *group, 1013int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
1008 struct in6_addr *src_addr) 1014 const struct in6_addr *src_addr)
1009{ 1015{
1010 struct inet6_dev *idev; 1016 struct inet6_dev *idev;
1011 struct ifmcaddr6 *mc; 1017 struct ifmcaddr6 *mc;
@@ -1393,10 +1399,12 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1393 1399
1394static struct sk_buff *mld_newpack(struct net_device *dev, int size) 1400static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1395{ 1401{
1396 struct sock *sk = igmp6_socket->sk; 1402 struct net *net = dev_net(dev);
1403 struct sock *sk = net->ipv6.igmp_sk;
1397 struct sk_buff *skb; 1404 struct sk_buff *skb;
1398 struct mld2_report *pmr; 1405 struct mld2_report *pmr;
1399 struct in6_addr addr_buf; 1406 struct in6_addr addr_buf;
1407 const struct in6_addr *saddr;
1400 int err; 1408 int err;
1401 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1409 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1402 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1410 IPV6_TLV_ROUTERALERT, 2, 0, 0,
@@ -1415,10 +1423,11 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1415 * use unspecified address as the source address 1423 * use unspecified address as the source address
1416 * when a valid link-local address is not available. 1424 * when a valid link-local address is not available.
1417 */ 1425 */
1418 memset(&addr_buf, 0, sizeof(addr_buf)); 1426 saddr = &in6addr_any;
1419 } 1427 } else
1428 saddr = &addr_buf;
1420 1429
1421 ip6_nd_hdr(sk, skb, dev, &addr_buf, &mld2_all_mcr, NEXTHDR_HOP, 0); 1430 ip6_nd_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1422 1431
1423 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); 1432 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1424 1433
@@ -1433,25 +1442,6 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1433 return skb; 1442 return skb;
1434} 1443}
1435 1444
1436static inline int mld_dev_queue_xmit2(struct sk_buff *skb)
1437{
1438 struct net_device *dev = skb->dev;
1439 unsigned char ha[MAX_ADDR_LEN];
1440
1441 ndisc_mc_map(&ipv6_hdr(skb)->daddr, ha, dev, 1);
1442 if (dev_hard_header(skb, dev, ETH_P_IPV6, ha, NULL, skb->len) < 0) {
1443 kfree_skb(skb);
1444 return -EINVAL;
1445 }
1446 return dev_queue_xmit(skb);
1447}
1448
1449static inline int mld_dev_queue_xmit(struct sk_buff *skb)
1450{
1451 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
1452 mld_dev_queue_xmit2);
1453}
1454
1455static void mld_sendpack(struct sk_buff *skb) 1445static void mld_sendpack(struct sk_buff *skb)
1456{ 1446{
1457 struct ipv6hdr *pip6 = ipv6_hdr(skb); 1447 struct ipv6hdr *pip6 = ipv6_hdr(skb);
@@ -1459,7 +1449,9 @@ static void mld_sendpack(struct sk_buff *skb)
1459 (struct mld2_report *)skb_transport_header(skb); 1449 (struct mld2_report *)skb_transport_header(skb);
1460 int payload_len, mldlen; 1450 int payload_len, mldlen;
1461 struct inet6_dev *idev = in6_dev_get(skb->dev); 1451 struct inet6_dev *idev = in6_dev_get(skb->dev);
1452 struct net *net = dev_net(skb->dev);
1462 int err; 1453 int err;
1454 struct flowi fl;
1463 1455
1464 IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); 1456 IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
1465 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); 1457 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
@@ -1469,8 +1461,25 @@ static void mld_sendpack(struct sk_buff *skb)
1469 pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, 1461 pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1470 IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), 1462 IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb),
1471 mldlen, 0)); 1463 mldlen, 0));
1464
1465 skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1466
1467 if (!skb->dst) {
1468 err = -ENOMEM;
1469 goto err_out;
1470 }
1471
1472 icmpv6_flow_init(net->ipv6.igmp_sk, &fl, ICMPV6_MLD2_REPORT,
1473 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1474 skb->dev->ifindex);
1475
1476 err = xfrm_lookup(&skb->dst, &fl, NULL, 0);
1477 if (err)
1478 goto err_out;
1479
1472 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1480 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1473 mld_dev_queue_xmit); 1481 dst_output);
1482out:
1474 if (!err) { 1483 if (!err) {
1475 ICMP6MSGOUT_INC_STATS_BH(idev, ICMPV6_MLD2_REPORT); 1484 ICMP6MSGOUT_INC_STATS_BH(idev, ICMPV6_MLD2_REPORT);
1476 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); 1485 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
@@ -1480,6 +1489,11 @@ static void mld_sendpack(struct sk_buff *skb)
1480 1489
1481 if (likely(idev != NULL)) 1490 if (likely(idev != NULL))
1482 in6_dev_put(idev); 1491 in6_dev_put(idev);
1492 return;
1493
1494err_out:
1495 kfree_skb(skb);
1496 goto out;
1483} 1497}
1484 1498
1485static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) 1499static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
@@ -1749,28 +1763,28 @@ static void mld_send_cr(struct inet6_dev *idev)
1749 1763
1750static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) 1764static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1751{ 1765{
1752 struct sock *sk = igmp6_socket->sk; 1766 struct net *net = dev_net(dev);
1767 struct sock *sk = net->ipv6.igmp_sk;
1753 struct inet6_dev *idev; 1768 struct inet6_dev *idev;
1754 struct sk_buff *skb; 1769 struct sk_buff *skb;
1755 struct icmp6hdr *hdr; 1770 struct icmp6hdr *hdr;
1756 struct in6_addr *snd_addr; 1771 const struct in6_addr *snd_addr, *saddr;
1757 struct in6_addr *addrp; 1772 struct in6_addr *addrp;
1758 struct in6_addr addr_buf; 1773 struct in6_addr addr_buf;
1759 struct in6_addr all_routers;
1760 int err, len, payload_len, full_len; 1774 int err, len, payload_len, full_len;
1761 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1775 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1762 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1776 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1763 IPV6_TLV_PADN, 0 }; 1777 IPV6_TLV_PADN, 0 };
1778 struct flowi fl;
1764 1779
1765 rcu_read_lock(); 1780 rcu_read_lock();
1766 IP6_INC_STATS(__in6_dev_get(dev), 1781 IP6_INC_STATS(__in6_dev_get(dev),
1767 IPSTATS_MIB_OUTREQUESTS); 1782 IPSTATS_MIB_OUTREQUESTS);
1768 rcu_read_unlock(); 1783 rcu_read_unlock();
1769 snd_addr = addr; 1784 if (type == ICMPV6_MGM_REDUCTION)
1770 if (type == ICMPV6_MGM_REDUCTION) { 1785 snd_addr = &in6addr_linklocal_allrouters;
1771 snd_addr = &all_routers; 1786 else
1772 ipv6_addr_all_routers(&all_routers); 1787 snd_addr = addr;
1773 }
1774 1788
1775 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr); 1789 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
1776 payload_len = len + sizeof(ra); 1790 payload_len = len + sizeof(ra);
@@ -1793,10 +1807,11 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1793 * use unspecified address as the source address 1807 * use unspecified address as the source address
1794 * when a valid link-local address is not available. 1808 * when a valid link-local address is not available.
1795 */ 1809 */
1796 memset(&addr_buf, 0, sizeof(addr_buf)); 1810 saddr = &in6addr_any;
1797 } 1811 } else
1812 saddr = &addr_buf;
1798 1813
1799 ip6_nd_hdr(sk, skb, dev, &addr_buf, snd_addr, NEXTHDR_HOP, payload_len); 1814 ip6_nd_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
1800 1815
1801 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); 1816 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1802 1817
@@ -1807,14 +1822,29 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1807 addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr)); 1822 addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr));
1808 ipv6_addr_copy(addrp, addr); 1823 ipv6_addr_copy(addrp, addr);
1809 1824
1810 hdr->icmp6_cksum = csum_ipv6_magic(&addr_buf, snd_addr, len, 1825 hdr->icmp6_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1811 IPPROTO_ICMPV6, 1826 IPPROTO_ICMPV6,
1812 csum_partial((__u8 *) hdr, len, 0)); 1827 csum_partial((__u8 *) hdr, len, 0));
1813 1828
1814 idev = in6_dev_get(skb->dev); 1829 idev = in6_dev_get(skb->dev);
1815 1830
1831 skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1832 if (!skb->dst) {
1833 err = -ENOMEM;
1834 goto err_out;
1835 }
1836
1837 icmpv6_flow_init(sk, &fl, type,
1838 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1839 skb->dev->ifindex);
1840
1841 err = xfrm_lookup(&skb->dst, &fl, NULL, 0);
1842 if (err)
1843 goto err_out;
1844
1816 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1845 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1817 mld_dev_queue_xmit); 1846 dst_output);
1847out:
1818 if (!err) { 1848 if (!err) {
1819 ICMP6MSGOUT_INC_STATS(idev, type); 1849 ICMP6MSGOUT_INC_STATS(idev, type);
1820 ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); 1850 ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
@@ -1825,6 +1855,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1825 if (likely(idev != NULL)) 1855 if (likely(idev != NULL))
1826 in6_dev_put(idev); 1856 in6_dev_put(idev);
1827 return; 1857 return;
1858
1859err_out:
1860 kfree_skb(skb);
1861 goto out;
1828} 1862}
1829 1863
1830static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 1864static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
@@ -2276,24 +2310,19 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
2276void ipv6_mc_destroy_dev(struct inet6_dev *idev) 2310void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2277{ 2311{
2278 struct ifmcaddr6 *i; 2312 struct ifmcaddr6 *i;
2279 struct in6_addr maddr;
2280 2313
2281 /* Deactivate timers */ 2314 /* Deactivate timers */
2282 ipv6_mc_down(idev); 2315 ipv6_mc_down(idev);
2283 2316
2284 /* Delete all-nodes address. */ 2317 /* Delete all-nodes address. */
2285 ipv6_addr_all_nodes(&maddr);
2286
2287 /* We cannot call ipv6_dev_mc_dec() directly, our caller in 2318 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2288 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will 2319 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2289 * fail. 2320 * fail.
2290 */ 2321 */
2291 __ipv6_dev_mc_dec(idev, &maddr); 2322 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2292 2323
2293 if (idev->cnf.forwarding) { 2324 if (idev->cnf.forwarding)
2294 ipv6_addr_all_routers(&maddr); 2325 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2295 __ipv6_dev_mc_dec(idev, &maddr);
2296 }
2297 2326
2298 write_lock_bh(&idev->lock); 2327 write_lock_bh(&idev->lock);
2299 while ((i = idev->mc_list) != NULL) { 2328 while ((i = idev->mc_list) != NULL) {
@@ -2310,6 +2339,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2310 2339
2311#ifdef CONFIG_PROC_FS 2340#ifdef CONFIG_PROC_FS
2312struct igmp6_mc_iter_state { 2341struct igmp6_mc_iter_state {
2342 struct seq_net_private p;
2313 struct net_device *dev; 2343 struct net_device *dev;
2314 struct inet6_dev *idev; 2344 struct inet6_dev *idev;
2315}; 2345};
@@ -2320,9 +2350,10 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2320{ 2350{
2321 struct ifmcaddr6 *im = NULL; 2351 struct ifmcaddr6 *im = NULL;
2322 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2352 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2353 struct net *net = seq_file_net(seq);
2323 2354
2324 state->idev = NULL; 2355 state->idev = NULL;
2325 for_each_netdev(&init_net, state->dev) { 2356 for_each_netdev(net, state->dev) {
2326 struct inet6_dev *idev; 2357 struct inet6_dev *idev;
2327 idev = in6_dev_get(state->dev); 2358 idev = in6_dev_get(state->dev);
2328 if (!idev) 2359 if (!idev)
@@ -2424,8 +2455,8 @@ static const struct seq_operations igmp6_mc_seq_ops = {
2424 2455
2425static int igmp6_mc_seq_open(struct inode *inode, struct file *file) 2456static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
2426{ 2457{
2427 return seq_open_private(file, &igmp6_mc_seq_ops, 2458 return seq_open_net(inode, file, &igmp6_mc_seq_ops,
2428 sizeof(struct igmp6_mc_iter_state)); 2459 sizeof(struct igmp6_mc_iter_state));
2429} 2460}
2430 2461
2431static const struct file_operations igmp6_mc_seq_fops = { 2462static const struct file_operations igmp6_mc_seq_fops = {
@@ -2433,10 +2464,11 @@ static const struct file_operations igmp6_mc_seq_fops = {
2433 .open = igmp6_mc_seq_open, 2464 .open = igmp6_mc_seq_open,
2434 .read = seq_read, 2465 .read = seq_read,
2435 .llseek = seq_lseek, 2466 .llseek = seq_lseek,
2436 .release = seq_release_private, 2467 .release = seq_release_net,
2437}; 2468};
2438 2469
2439struct igmp6_mcf_iter_state { 2470struct igmp6_mcf_iter_state {
2471 struct seq_net_private p;
2440 struct net_device *dev; 2472 struct net_device *dev;
2441 struct inet6_dev *idev; 2473 struct inet6_dev *idev;
2442 struct ifmcaddr6 *im; 2474 struct ifmcaddr6 *im;
@@ -2449,10 +2481,11 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2449 struct ip6_sf_list *psf = NULL; 2481 struct ip6_sf_list *psf = NULL;
2450 struct ifmcaddr6 *im = NULL; 2482 struct ifmcaddr6 *im = NULL;
2451 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2483 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2484 struct net *net = seq_file_net(seq);
2452 2485
2453 state->idev = NULL; 2486 state->idev = NULL;
2454 state->im = NULL; 2487 state->im = NULL;
2455 for_each_netdev(&init_net, state->dev) { 2488 for_each_netdev(net, state->dev) {
2456 struct inet6_dev *idev; 2489 struct inet6_dev *idev;
2457 idev = in6_dev_get(state->dev); 2490 idev = in6_dev_get(state->dev);
2458 if (unlikely(idev == NULL)) 2491 if (unlikely(idev == NULL))
@@ -2584,8 +2617,8 @@ static const struct seq_operations igmp6_mcf_seq_ops = {
2584 2617
2585static int igmp6_mcf_seq_open(struct inode *inode, struct file *file) 2618static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
2586{ 2619{
2587 return seq_open_private(file, &igmp6_mcf_seq_ops, 2620 return seq_open_net(inode, file, &igmp6_mcf_seq_ops,
2588 sizeof(struct igmp6_mcf_iter_state)); 2621 sizeof(struct igmp6_mcf_iter_state));
2589} 2622}
2590 2623
2591static const struct file_operations igmp6_mcf_seq_fops = { 2624static const struct file_operations igmp6_mcf_seq_fops = {
@@ -2593,47 +2626,88 @@ static const struct file_operations igmp6_mcf_seq_fops = {
2593 .open = igmp6_mcf_seq_open, 2626 .open = igmp6_mcf_seq_open,
2594 .read = seq_read, 2627 .read = seq_read,
2595 .llseek = seq_lseek, 2628 .llseek = seq_lseek,
2596 .release = seq_release_private, 2629 .release = seq_release_net,
2597}; 2630};
2631
2632static int igmp6_proc_init(struct net *net)
2633{
2634 int err;
2635
2636 err = -ENOMEM;
2637 if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops))
2638 goto out;
2639 if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO,
2640 &igmp6_mcf_seq_fops))
2641 goto out_proc_net_igmp6;
2642
2643 err = 0;
2644out:
2645 return err;
2646
2647out_proc_net_igmp6:
2648 proc_net_remove(net, "igmp6");
2649 goto out;
2650}
2651
2652static void igmp6_proc_exit(struct net *net)
2653{
2654 proc_net_remove(net, "mcfilter6");
2655 proc_net_remove(net, "igmp6");
2656}
2657#else
2658static int igmp6_proc_init(struct net *net)
2659{
2660 return 0;
2661}
2662static void igmp6_proc_exit(struct net *net)
2663{
2664 ;
2665}
2598#endif 2666#endif
2599 2667
2600int __init igmp6_init(struct net_proto_family *ops) 2668static int igmp6_net_init(struct net *net)
2601{ 2669{
2602 struct ipv6_pinfo *np;
2603 struct sock *sk;
2604 int err; 2670 int err;
2605 2671
2606 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &igmp6_socket); 2672 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2673 SOCK_RAW, IPPROTO_ICMPV6, net);
2607 if (err < 0) { 2674 if (err < 0) {
2608 printk(KERN_ERR 2675 printk(KERN_ERR
2609 "Failed to initialize the IGMP6 control socket (err %d).\n", 2676 "Failed to initialize the IGMP6 control socket (err %d).\n",
2610 err); 2677 err);
2611 igmp6_socket = NULL; /* For safety. */ 2678 goto out;
2612 return err;
2613 } 2679 }
2614 2680
2615 sk = igmp6_socket->sk; 2681 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2616 sk->sk_allocation = GFP_ATOMIC;
2617 sk->sk_prot->unhash(sk);
2618 2682
2619 np = inet6_sk(sk); 2683 err = igmp6_proc_init(net);
2620 np->hop_limit = 1; 2684 if (err)
2685 goto out_sock_create;
2686out:
2687 return err;
2621 2688
2622#ifdef CONFIG_PROC_FS 2689out_sock_create:
2623 proc_net_fops_create(&init_net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops); 2690 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2624 proc_net_fops_create(&init_net, "mcfilter6", S_IRUGO, &igmp6_mcf_seq_fops); 2691 goto out;
2625#endif 2692}
2626 2693
2627 return 0; 2694static void igmp6_net_exit(struct net *net)
2695{
2696 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2697 igmp6_proc_exit(net);
2628} 2698}
2629 2699
2630void igmp6_cleanup(void) 2700static struct pernet_operations igmp6_net_ops = {
2701 .init = igmp6_net_init,
2702 .exit = igmp6_net_exit,
2703};
2704
2705int __init igmp6_init(void)
2631{ 2706{
2632 sock_release(igmp6_socket); 2707 return register_pernet_subsys(&igmp6_net_ops);
2633 igmp6_socket = NULL; /* for safety */ 2708}
2634 2709
2635#ifdef CONFIG_PROC_FS 2710void igmp6_cleanup(void)
2636 proc_net_remove(&init_net, "mcfilter6"); 2711{
2637 proc_net_remove(&init_net, "igmp6"); 2712 unregister_pernet_subsys(&igmp6_net_ops);
2638#endif
2639} 2713}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index cd8a5bda13cd..ad1cc5bbf977 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -44,9 +44,9 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
44 if (!data) 44 if (!data)
45 return NULL; 45 return NULL;
46 if (padlen == 1) { 46 if (padlen == 1) {
47 data[0] = MIP6_OPT_PAD_1; 47 data[0] = IPV6_TLV_PAD0;
48 } else if (padlen > 1) { 48 } else if (padlen > 1) {
49 data[0] = MIP6_OPT_PAD_N; 49 data[0] = IPV6_TLV_PADN;
50 data[1] = padlen - 2; 50 data[1] = padlen - 2;
51 if (padlen > 2) 51 if (padlen > 2)
52 memset(data+2, 0, data[1]); 52 memset(data+2, 0, data[1]);
@@ -304,13 +304,13 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
304static int mip6_destopt_init_state(struct xfrm_state *x) 304static int mip6_destopt_init_state(struct xfrm_state *x)
305{ 305{
306 if (x->id.spi) { 306 if (x->id.spi) {
307 printk(KERN_INFO "%s: spi is not 0: %u\n", __FUNCTION__, 307 printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
308 x->id.spi); 308 x->id.spi);
309 return -EINVAL; 309 return -EINVAL;
310 } 310 }
311 if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { 311 if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
312 printk(KERN_INFO "%s: state's mode is not %u: %u\n", 312 printk(KERN_INFO "%s: state's mode is not %u: %u\n",
313 __FUNCTION__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); 313 __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
314 return -EINVAL; 314 return -EINVAL;
315 } 315 }
316 316
@@ -439,13 +439,13 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
439static int mip6_rthdr_init_state(struct xfrm_state *x) 439static int mip6_rthdr_init_state(struct xfrm_state *x)
440{ 440{
441 if (x->id.spi) { 441 if (x->id.spi) {
442 printk(KERN_INFO "%s: spi is not 0: %u\n", __FUNCTION__, 442 printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
443 x->id.spi); 443 x->id.spi);
444 return -EINVAL; 444 return -EINVAL;
445 } 445 }
446 if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { 446 if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
447 printk(KERN_INFO "%s: state's mode is not %u: %u\n", 447 printk(KERN_INFO "%s: state's mode is not %u: %u\n",
448 __FUNCTION__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); 448 __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
449 return -EINVAL; 449 return -EINVAL;
450 } 450 }
451 451
@@ -480,15 +480,15 @@ static int __init mip6_init(void)
480 printk(KERN_INFO "Mobile IPv6\n"); 480 printk(KERN_INFO "Mobile IPv6\n");
481 481
482 if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) { 482 if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) {
483 printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __FUNCTION__); 483 printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__);
484 goto mip6_destopt_xfrm_fail; 484 goto mip6_destopt_xfrm_fail;
485 } 485 }
486 if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) { 486 if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) {
487 printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __FUNCTION__); 487 printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__);
488 goto mip6_rthdr_xfrm_fail; 488 goto mip6_rthdr_xfrm_fail;
489 } 489 }
490 if (rawv6_mh_filter_register(mip6_mh_filter) < 0) { 490 if (rawv6_mh_filter_register(mip6_mh_filter) < 0) {
491 printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __FUNCTION__); 491 printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__);
492 goto mip6_rawv6_mh_fail; 492 goto mip6_rawv6_mh_fail;
493 } 493 }
494 494
@@ -506,11 +506,11 @@ static int __init mip6_init(void)
506static void __exit mip6_fini(void) 506static void __exit mip6_fini(void)
507{ 507{
508 if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0) 508 if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0)
509 printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __FUNCTION__); 509 printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__);
510 if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0) 510 if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0)
511 printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __FUNCTION__); 511 printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__);
512 if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0) 512 if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0)
513 printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __FUNCTION__); 513 printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__);
514} 514}
515 515
516module_init(mip6_init); 516module_init(mip6_init);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 452a2ac4eec8..2c74885f8355 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -84,13 +84,12 @@
84 84
85#include <net/flow.h> 85#include <net/flow.h>
86#include <net/ip6_checksum.h> 86#include <net/ip6_checksum.h>
87#include <net/inet_common.h>
87#include <linux/proc_fs.h> 88#include <linux/proc_fs.h>
88 89
89#include <linux/netfilter.h> 90#include <linux/netfilter.h>
90#include <linux/netfilter_ipv6.h> 91#include <linux/netfilter_ipv6.h>
91 92
92static struct socket *ndisc_socket;
93
94static u32 ndisc_hash(const void *pkey, const struct net_device *dev); 93static u32 ndisc_hash(const void *pkey, const struct net_device *dev);
95static int ndisc_constructor(struct neighbour *neigh); 94static int ndisc_constructor(struct neighbour *neigh);
96static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb); 95static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
@@ -270,7 +269,7 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
270 if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { 269 if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
271 ND_PRINTK2(KERN_WARNING 270 ND_PRINTK2(KERN_WARNING
272 "%s(): duplicated ND6 option found: type=%d\n", 271 "%s(): duplicated ND6 option found: type=%d\n",
273 __FUNCTION__, 272 __func__,
274 nd_opt->nd_opt_type); 273 nd_opt->nd_opt_type);
275 } else { 274 } else {
276 ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; 275 ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt;
@@ -301,7 +300,7 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
301 */ 300 */
302 ND_PRINTK2(KERN_NOTICE 301 ND_PRINTK2(KERN_NOTICE
303 "%s(): ignored unsupported option; type=%d, len=%d\n", 302 "%s(): ignored unsupported option; type=%d, len=%d\n",
304 __FUNCTION__, 303 __func__,
305 nd_opt->nd_opt_type, nd_opt->nd_opt_len); 304 nd_opt->nd_opt_type, nd_opt->nd_opt_len);
306 } 305 }
307 } 306 }
@@ -441,30 +440,17 @@ static void pndisc_destructor(struct pneigh_entry *n)
441/* 440/*
442 * Send a Neighbour Advertisement 441 * Send a Neighbour Advertisement
443 */ 442 */
444
445static inline void ndisc_flow_init(struct flowi *fl, u8 type,
446 struct in6_addr *saddr, struct in6_addr *daddr,
447 int oif)
448{
449 memset(fl, 0, sizeof(*fl));
450 ipv6_addr_copy(&fl->fl6_src, saddr);
451 ipv6_addr_copy(&fl->fl6_dst, daddr);
452 fl->proto = IPPROTO_ICMPV6;
453 fl->fl_icmp_type = type;
454 fl->fl_icmp_code = 0;
455 fl->oif = oif;
456 security_sk_classify_flow(ndisc_socket->sk, fl);
457}
458
459static void __ndisc_send(struct net_device *dev, 443static void __ndisc_send(struct net_device *dev,
460 struct neighbour *neigh, 444 struct neighbour *neigh,
461 struct in6_addr *daddr, struct in6_addr *saddr, 445 const struct in6_addr *daddr,
462 struct icmp6hdr *icmp6h, struct in6_addr *target, 446 const struct in6_addr *saddr,
447 struct icmp6hdr *icmp6h, const struct in6_addr *target,
463 int llinfo) 448 int llinfo)
464{ 449{
465 struct flowi fl; 450 struct flowi fl;
466 struct dst_entry *dst; 451 struct dst_entry *dst;
467 struct sock *sk = ndisc_socket->sk; 452 struct net *net = dev_net(dev);
453 struct sock *sk = net->ipv6.ndisc_sk;
468 struct sk_buff *skb; 454 struct sk_buff *skb;
469 struct icmp6hdr *hdr; 455 struct icmp6hdr *hdr;
470 struct inet6_dev *idev; 456 struct inet6_dev *idev;
@@ -474,10 +460,9 @@ static void __ndisc_send(struct net_device *dev,
474 460
475 type = icmp6h->icmp6_type; 461 type = icmp6h->icmp6_type;
476 462
477 ndisc_flow_init(&fl, type, saddr, daddr, 463 icmpv6_flow_init(sk, &fl, type, saddr, daddr, dev->ifindex);
478 dev->ifindex);
479 464
480 dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output); 465 dst = icmp6_dst_alloc(dev, neigh, daddr);
481 if (!dst) 466 if (!dst)
482 return; 467 return;
483 468
@@ -499,7 +484,7 @@ static void __ndisc_send(struct net_device *dev,
499 if (!skb) { 484 if (!skb) {
500 ND_PRINTK0(KERN_ERR 485 ND_PRINTK0(KERN_ERR
501 "ICMPv6 ND: %s() failed to allocate an skb.\n", 486 "ICMPv6 ND: %s() failed to allocate an skb.\n",
502 __FUNCTION__); 487 __func__);
503 dst_release(dst); 488 dst_release(dst);
504 return; 489 return;
505 } 490 }
@@ -545,25 +530,28 @@ static void __ndisc_send(struct net_device *dev,
545} 530}
546 531
547static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, 532static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
548 struct in6_addr *daddr, struct in6_addr *solicited_addr, 533 const struct in6_addr *daddr,
549 int router, int solicited, int override, int inc_opt) 534 const struct in6_addr *solicited_addr,
535 int router, int solicited, int override, int inc_opt)
550{ 536{
551 struct in6_addr tmpaddr; 537 struct in6_addr tmpaddr;
552 struct inet6_ifaddr *ifp; 538 struct inet6_ifaddr *ifp;
553 struct in6_addr *src_addr; 539 const struct in6_addr *src_addr;
554 struct icmp6hdr icmp6h = { 540 struct icmp6hdr icmp6h = {
555 .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT, 541 .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
556 }; 542 };
557 543
558 /* for anycast or proxy, solicited_addr != src_addr */ 544 /* for anycast or proxy, solicited_addr != src_addr */
559 ifp = ipv6_get_ifaddr(&init_net, solicited_addr, dev, 1); 545 ifp = ipv6_get_ifaddr(dev_net(dev), solicited_addr, dev, 1);
560 if (ifp) { 546 if (ifp) {
561 src_addr = solicited_addr; 547 src_addr = solicited_addr;
562 if (ifp->flags & IFA_F_OPTIMISTIC) 548 if (ifp->flags & IFA_F_OPTIMISTIC)
563 override = 0; 549 override = 0;
564 in6_ifa_put(ifp); 550 in6_ifa_put(ifp);
565 } else { 551 } else {
566 if (ipv6_dev_get_saddr(dev, daddr, &tmpaddr)) 552 if (ipv6_dev_get_saddr(dev, daddr,
553 inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs,
554 &tmpaddr))
567 return; 555 return;
568 src_addr = &tmpaddr; 556 src_addr = &tmpaddr;
569 } 557 }
@@ -578,8 +566,8 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
578} 566}
579 567
580void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, 568void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
581 struct in6_addr *solicit, 569 const struct in6_addr *solicit,
582 struct in6_addr *daddr, struct in6_addr *saddr) 570 const struct in6_addr *daddr, const struct in6_addr *saddr)
583{ 571{
584 struct in6_addr addr_buf; 572 struct in6_addr addr_buf;
585 struct icmp6hdr icmp6h = { 573 struct icmp6hdr icmp6h = {
@@ -598,8 +586,8 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
598 !ipv6_addr_any(saddr) ? ND_OPT_SOURCE_LL_ADDR : 0); 586 !ipv6_addr_any(saddr) ? ND_OPT_SOURCE_LL_ADDR : 0);
599} 587}
600 588
601void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, 589void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
602 struct in6_addr *daddr) 590 const struct in6_addr *daddr)
603{ 591{
604 struct icmp6hdr icmp6h = { 592 struct icmp6hdr icmp6h = {
605 .icmp6_type = NDISC_ROUTER_SOLICITATION, 593 .icmp6_type = NDISC_ROUTER_SOLICITATION,
@@ -616,7 +604,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
616 * suppress the inclusion of the sllao. 604 * suppress the inclusion of the sllao.
617 */ 605 */
618 if (send_sllao) { 606 if (send_sllao) {
619 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(&init_net, saddr, 607 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(dev_net(dev), saddr,
620 dev, 1); 608 dev, 1);
621 if (ifp) { 609 if (ifp) {
622 if (ifp->flags & IFA_F_OPTIMISTIC) { 610 if (ifp->flags & IFA_F_OPTIMISTIC) {
@@ -654,7 +642,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
654 struct in6_addr *target = (struct in6_addr *)&neigh->primary_key; 642 struct in6_addr *target = (struct in6_addr *)&neigh->primary_key;
655 int probes = atomic_read(&neigh->probes); 643 int probes = atomic_read(&neigh->probes);
656 644
657 if (skb && ipv6_chk_addr(&init_net, &ipv6_hdr(skb)->saddr, dev, 1)) 645 if (skb && ipv6_chk_addr(dev_net(dev), &ipv6_hdr(skb)->saddr, dev, 1))
658 saddr = &ipv6_hdr(skb)->saddr; 646 saddr = &ipv6_hdr(skb)->saddr;
659 647
660 if ((probes -= neigh->parms->ucast_probes) < 0) { 648 if ((probes -= neigh->parms->ucast_probes) < 0) {
@@ -662,7 +650,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
662 ND_PRINTK1(KERN_DEBUG 650 ND_PRINTK1(KERN_DEBUG
663 "%s(): trying to ucast probe in NUD_INVALID: " 651 "%s(): trying to ucast probe in NUD_INVALID: "
664 NIP6_FMT "\n", 652 NIP6_FMT "\n",
665 __FUNCTION__, 653 __func__,
666 NIP6(*target)); 654 NIP6(*target));
667 } 655 }
668 ndisc_send_ns(dev, neigh, target, target, saddr); 656 ndisc_send_ns(dev, neigh, target, target, saddr);
@@ -676,18 +664,19 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
676 } 664 }
677} 665}
678 666
679static struct pneigh_entry *pndisc_check_router(struct net_device *dev, 667static int pndisc_is_router(const void *pkey,
680 struct in6_addr *addr, int *is_router) 668 struct net_device *dev)
681{ 669{
682 struct pneigh_entry *n; 670 struct pneigh_entry *n;
671 int ret = -1;
683 672
684 read_lock_bh(&nd_tbl.lock); 673 read_lock_bh(&nd_tbl.lock);
685 n = __pneigh_lookup(&nd_tbl, &init_net, addr, dev); 674 n = __pneigh_lookup(&nd_tbl, dev_net(dev), pkey, dev);
686 if (n != NULL) 675 if (n)
687 *is_router = (n->flags & NTF_ROUTER); 676 ret = !!(n->flags & NTF_ROUTER);
688 read_unlock_bh(&nd_tbl.lock); 677 read_unlock_bh(&nd_tbl.lock);
689 678
690 return n; 679 return ret;
691} 680}
692 681
693static void ndisc_recv_ns(struct sk_buff *skb) 682static void ndisc_recv_ns(struct sk_buff *skb)
@@ -703,10 +692,9 @@ static void ndisc_recv_ns(struct sk_buff *skb)
703 struct inet6_ifaddr *ifp; 692 struct inet6_ifaddr *ifp;
704 struct inet6_dev *idev = NULL; 693 struct inet6_dev *idev = NULL;
705 struct neighbour *neigh; 694 struct neighbour *neigh;
706 struct pneigh_entry *pneigh = NULL;
707 int dad = ipv6_addr_any(saddr); 695 int dad = ipv6_addr_any(saddr);
708 int inc; 696 int inc;
709 int is_router = 0; 697 int is_router = -1;
710 698
711 if (ipv6_addr_is_multicast(&msg->target)) { 699 if (ipv6_addr_is_multicast(&msg->target)) {
712 ND_PRINTK2(KERN_WARNING 700 ND_PRINTK2(KERN_WARNING
@@ -756,7 +744,8 @@ static void ndisc_recv_ns(struct sk_buff *skb)
756 744
757 inc = ipv6_addr_is_multicast(daddr); 745 inc = ipv6_addr_is_multicast(daddr);
758 746
759 if ((ifp = ipv6_get_ifaddr(&init_net, &msg->target, dev, 1)) != NULL) { 747 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
748 if (ifp) {
760 749
761 if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { 750 if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) {
762 if (dad) { 751 if (dad) {
@@ -801,11 +790,10 @@ static void ndisc_recv_ns(struct sk_buff *skb)
801 return; 790 return;
802 } 791 }
803 792
804 if (ipv6_chk_acast_addr(dev, &msg->target) || 793 if (ipv6_chk_acast_addr(dev_net(dev), dev, &msg->target) ||
805 (idev->cnf.forwarding && 794 (idev->cnf.forwarding &&
806 (ipv6_devconf.proxy_ndp || idev->cnf.proxy_ndp) && 795 (ipv6_devconf.proxy_ndp || idev->cnf.proxy_ndp) &&
807 (pneigh = pndisc_check_router(dev, &msg->target, 796 (is_router = pndisc_is_router(&msg->target, dev)) >= 0)) {
808 &is_router)) != NULL)) {
809 if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) && 797 if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
810 skb->pkt_type != PACKET_HOST && 798 skb->pkt_type != PACKET_HOST &&
811 inc != 0 && 799 inc != 0 &&
@@ -826,13 +814,11 @@ static void ndisc_recv_ns(struct sk_buff *skb)
826 goto out; 814 goto out;
827 } 815 }
828 816
829 is_router = !!(pneigh ? is_router : idev->cnf.forwarding); 817 if (is_router < 0)
818 is_router = !!idev->cnf.forwarding;
830 819
831 if (dad) { 820 if (dad) {
832 struct in6_addr maddr; 821 ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &msg->target,
833
834 ipv6_addr_all_nodes(&maddr);
835 ndisc_send_na(dev, NULL, &maddr, &msg->target,
836 is_router, 0, (ifp != NULL), 1); 822 is_router, 0, (ifp != NULL), 1);
837 goto out; 823 goto out;
838 } 824 }
@@ -914,7 +900,8 @@ static void ndisc_recv_na(struct sk_buff *skb)
914 return; 900 return;
915 } 901 }
916 } 902 }
917 if ((ifp = ipv6_get_ifaddr(&init_net, &msg->target, dev, 1))) { 903 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
904 if (ifp) {
918 if (ifp->flags & IFA_F_TENTATIVE) { 905 if (ifp->flags & IFA_F_TENTATIVE) {
919 addrconf_dad_failure(ifp); 906 addrconf_dad_failure(ifp);
920 return; 907 return;
@@ -945,7 +932,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
945 */ 932 */
946 if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && 933 if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
947 ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp && 934 ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp &&
948 pneigh_lookup(&nd_tbl, &init_net, &msg->target, dev, 0)) { 935 pneigh_lookup(&nd_tbl, dev_net(dev), &msg->target, dev, 0)) {
949 /* XXX: idev->cnf.prixy_ndp */ 936 /* XXX: idev->cnf.prixy_ndp */
950 goto out; 937 goto out;
951 } 938 }
@@ -1035,6 +1022,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
1035 struct sk_buff *skb; 1022 struct sk_buff *skb;
1036 struct nlmsghdr *nlh; 1023 struct nlmsghdr *nlh;
1037 struct nduseroptmsg *ndmsg; 1024 struct nduseroptmsg *ndmsg;
1025 struct net *net = dev_net(ra->dev);
1038 int err; 1026 int err;
1039 int base_size = NLMSG_ALIGN(sizeof(struct nduseroptmsg) 1027 int base_size = NLMSG_ALIGN(sizeof(struct nduseroptmsg)
1040 + (opt->nd_opt_len << 3)); 1028 + (opt->nd_opt_len << 3));
@@ -1064,7 +1052,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
1064 &ipv6_hdr(ra)->saddr); 1052 &ipv6_hdr(ra)->saddr);
1065 nlmsg_end(skb, nlh); 1053 nlmsg_end(skb, nlh);
1066 1054
1067 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_ND_USEROPT, NULL, 1055 err = rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL,
1068 GFP_ATOMIC); 1056 GFP_ATOMIC);
1069 if (err < 0) 1057 if (err < 0)
1070 goto errout; 1058 goto errout;
@@ -1075,7 +1063,7 @@ nla_put_failure:
1075 nlmsg_free(skb); 1063 nlmsg_free(skb);
1076 err = -EMSGSIZE; 1064 err = -EMSGSIZE;
1077errout: 1065errout:
1078 rtnl_set_sk_err(&init_net, RTNLGRP_ND_USEROPT, err); 1066 rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err);
1079} 1067}
1080 1068
1081static void ndisc_router_discovery(struct sk_buff *skb) 1069static void ndisc_router_discovery(struct sk_buff *skb)
@@ -1104,6 +1092,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1104 return; 1092 return;
1105 } 1093 }
1106 1094
1095#ifdef CONFIG_IPV6_NDISC_NODETYPE
1096 if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
1097 ND_PRINTK2(KERN_WARNING
1098 "ICMPv6 RA: from host or unauthorized router\n");
1099 return;
1100 }
1101#endif
1102
1107 /* 1103 /*
1108 * set the RA_RECV flag in the interface 1104 * set the RA_RECV flag in the interface
1109 */ 1105 */
@@ -1127,6 +1123,12 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1127 return; 1123 return;
1128 } 1124 }
1129 1125
1126#ifdef CONFIG_IPV6_NDISC_NODETYPE
1127 /* skip link-specific parameters from interior routers */
1128 if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
1129 goto skip_linkparms;
1130#endif
1131
1130 if (in6_dev->if_flags & IF_RS_SENT) { 1132 if (in6_dev->if_flags & IF_RS_SENT) {
1131 /* 1133 /*
1132 * flag that an RA was received after an RS was sent 1134 * flag that an RA was received after an RS was sent
@@ -1178,7 +1180,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1178 if (rt == NULL) { 1180 if (rt == NULL) {
1179 ND_PRINTK0(KERN_ERR 1181 ND_PRINTK0(KERN_ERR
1180 "ICMPv6 RA: %s() failed to add default route.\n", 1182 "ICMPv6 RA: %s() failed to add default route.\n",
1181 __FUNCTION__); 1183 __func__);
1182 in6_dev_put(in6_dev); 1184 in6_dev_put(in6_dev);
1183 return; 1185 return;
1184 } 1186 }
@@ -1187,7 +1189,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1187 if (neigh == NULL) { 1189 if (neigh == NULL) {
1188 ND_PRINTK0(KERN_ERR 1190 ND_PRINTK0(KERN_ERR
1189 "ICMPv6 RA: %s() got default router without neighbour.\n", 1191 "ICMPv6 RA: %s() got default router without neighbour.\n",
1190 __FUNCTION__); 1192 __func__);
1191 dst_release(&rt->u.dst); 1193 dst_release(&rt->u.dst);
1192 in6_dev_put(in6_dev); 1194 in6_dev_put(in6_dev);
1193 return; 1195 return;
@@ -1241,6 +1243,10 @@ skip_defrtr:
1241 } 1243 }
1242 } 1244 }
1243 1245
1246#ifdef CONFIG_IPV6_NDISC_NODETYPE
1247skip_linkparms:
1248#endif
1249
1244 /* 1250 /*
1245 * Process options. 1251 * Process options.
1246 */ 1252 */
@@ -1272,7 +1278,13 @@ skip_defrtr:
1272 for (p = ndopts.nd_opts_ri; 1278 for (p = ndopts.nd_opts_ri;
1273 p; 1279 p;
1274 p = ndisc_next_option(p, ndopts.nd_opts_ri_end)) { 1280 p = ndisc_next_option(p, ndopts.nd_opts_ri_end)) {
1275 if (((struct route_info *)p)->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) 1281 struct route_info *ri = (struct route_info *)p;
1282#ifdef CONFIG_IPV6_NDISC_NODETYPE
1283 if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT &&
1284 ri->prefix_len == 0)
1285 continue;
1286#endif
1287 if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
1276 continue; 1288 continue;
1277 rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, 1289 rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
1278 &ipv6_hdr(skb)->saddr); 1290 &ipv6_hdr(skb)->saddr);
@@ -1280,6 +1292,12 @@ skip_defrtr:
1280 } 1292 }
1281#endif 1293#endif
1282 1294
1295#ifdef CONFIG_IPV6_NDISC_NODETYPE
1296 /* skip link-specific ndopts from interior routers */
1297 if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
1298 goto out;
1299#endif
1300
1283 if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) { 1301 if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
1284 struct nd_opt_hdr *p; 1302 struct nd_opt_hdr *p;
1285 for (p = ndopts.nd_opts_pi; 1303 for (p = ndopts.nd_opts_pi;
@@ -1343,6 +1361,16 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1343 int optlen; 1361 int optlen;
1344 u8 *lladdr = NULL; 1362 u8 *lladdr = NULL;
1345 1363
1364#ifdef CONFIG_IPV6_NDISC_NODETYPE
1365 switch (skb->ndisc_nodetype) {
1366 case NDISC_NODETYPE_HOST:
1367 case NDISC_NODETYPE_NODEFAULT:
1368 ND_PRINTK2(KERN_WARNING
1369 "ICMPv6 Redirect: from host or unauthorized router\n");
1370 return;
1371 }
1372#endif
1373
1346 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { 1374 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
1347 ND_PRINTK2(KERN_WARNING 1375 ND_PRINTK2(KERN_WARNING
1348 "ICMPv6 Redirect: source address is not link-local.\n"); 1376 "ICMPv6 Redirect: source address is not link-local.\n");
@@ -1418,15 +1446,16 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1418} 1446}
1419 1447
1420void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, 1448void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1421 struct in6_addr *target) 1449 const struct in6_addr *target)
1422{ 1450{
1423 struct sock *sk = ndisc_socket->sk; 1451 struct net_device *dev = skb->dev;
1452 struct net *net = dev_net(dev);
1453 struct sock *sk = net->ipv6.ndisc_sk;
1424 int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); 1454 int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
1425 struct sk_buff *buff; 1455 struct sk_buff *buff;
1426 struct icmp6hdr *icmph; 1456 struct icmp6hdr *icmph;
1427 struct in6_addr saddr_buf; 1457 struct in6_addr saddr_buf;
1428 struct in6_addr *addrp; 1458 struct in6_addr *addrp;
1429 struct net_device *dev;
1430 struct rt6_info *rt; 1459 struct rt6_info *rt;
1431 struct dst_entry *dst; 1460 struct dst_entry *dst;
1432 struct inet6_dev *idev; 1461 struct inet6_dev *idev;
@@ -1436,8 +1465,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1436 int err; 1465 int err;
1437 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; 1466 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
1438 1467
1439 dev = skb->dev;
1440
1441 if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { 1468 if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
1442 ND_PRINTK2(KERN_WARNING 1469 ND_PRINTK2(KERN_WARNING
1443 "ICMPv6 Redirect: no link-local address on %s\n", 1470 "ICMPv6 Redirect: no link-local address on %s\n",
@@ -1452,10 +1479,10 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1452 return; 1479 return;
1453 } 1480 }
1454 1481
1455 ndisc_flow_init(&fl, NDISC_REDIRECT, &saddr_buf, &ipv6_hdr(skb)->saddr, 1482 icmpv6_flow_init(sk, &fl, NDISC_REDIRECT,
1456 dev->ifindex); 1483 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
1457 1484
1458 dst = ip6_route_output(NULL, &fl); 1485 dst = ip6_route_output(net, NULL, &fl);
1459 if (dst == NULL) 1486 if (dst == NULL)
1460 return; 1487 return;
1461 1488
@@ -1499,12 +1526,11 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1499 if (buff == NULL) { 1526 if (buff == NULL) {
1500 ND_PRINTK0(KERN_ERR 1527 ND_PRINTK0(KERN_ERR
1501 "ICMPv6 Redirect: %s() failed to allocate an skb.\n", 1528 "ICMPv6 Redirect: %s() failed to allocate an skb.\n",
1502 __FUNCTION__); 1529 __func__);
1503 dst_release(dst); 1530 dst_release(dst);
1504 return; 1531 return;
1505 } 1532 }
1506 1533
1507
1508 skb_reserve(buff, LL_RESERVED_SPACE(dev)); 1534 skb_reserve(buff, LL_RESERVED_SPACE(dev));
1509 ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, 1535 ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
1510 IPPROTO_ICMPV6, len); 1536 IPPROTO_ICMPV6, len);
@@ -1625,18 +1651,16 @@ int ndisc_rcv(struct sk_buff *skb)
1625static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1651static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1626{ 1652{
1627 struct net_device *dev = ptr; 1653 struct net_device *dev = ptr;
1628 1654 struct net *net = dev_net(dev);
1629 if (dev->nd_net != &init_net)
1630 return NOTIFY_DONE;
1631 1655
1632 switch (event) { 1656 switch (event) {
1633 case NETDEV_CHANGEADDR: 1657 case NETDEV_CHANGEADDR:
1634 neigh_changeaddr(&nd_tbl, dev); 1658 neigh_changeaddr(&nd_tbl, dev);
1635 fib6_run_gc(~0UL); 1659 fib6_run_gc(~0UL, net);
1636 break; 1660 break;
1637 case NETDEV_DOWN: 1661 case NETDEV_DOWN:
1638 neigh_ifdown(&nd_tbl, dev); 1662 neigh_ifdown(&nd_tbl, dev);
1639 fib6_run_gc(~0UL); 1663 fib6_run_gc(~0UL, net);
1640 break; 1664 break;
1641 default: 1665 default:
1642 break; 1666 break;
@@ -1745,44 +1769,74 @@ static int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name,
1745 1769
1746#endif 1770#endif
1747 1771
1748int __init ndisc_init(struct net_proto_family *ops) 1772static int ndisc_net_init(struct net *net)
1749{ 1773{
1750 struct ipv6_pinfo *np; 1774 struct ipv6_pinfo *np;
1751 struct sock *sk; 1775 struct sock *sk;
1752 int err; 1776 int err;
1753 1777
1754 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &ndisc_socket); 1778 err = inet_ctl_sock_create(&sk, PF_INET6,
1779 SOCK_RAW, IPPROTO_ICMPV6, net);
1755 if (err < 0) { 1780 if (err < 0) {
1756 ND_PRINTK0(KERN_ERR 1781 ND_PRINTK0(KERN_ERR
1757 "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n", 1782 "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n",
1758 err); 1783 err);
1759 ndisc_socket = NULL; /* For safety. */
1760 return err; 1784 return err;
1761 } 1785 }
1762 1786
1763 sk = ndisc_socket->sk; 1787 net->ipv6.ndisc_sk = sk;
1788
1764 np = inet6_sk(sk); 1789 np = inet6_sk(sk);
1765 sk->sk_allocation = GFP_ATOMIC;
1766 np->hop_limit = 255; 1790 np->hop_limit = 255;
1767 /* Do not loopback ndisc messages */ 1791 /* Do not loopback ndisc messages */
1768 np->mc_loop = 0; 1792 np->mc_loop = 0;
1769 sk->sk_prot->unhash(sk);
1770 1793
1794 return 0;
1795}
1796
1797static void ndisc_net_exit(struct net *net)
1798{
1799 inet_ctl_sock_destroy(net->ipv6.ndisc_sk);
1800}
1801
1802static struct pernet_operations ndisc_net_ops = {
1803 .init = ndisc_net_init,
1804 .exit = ndisc_net_exit,
1805};
1806
1807int __init ndisc_init(void)
1808{
1809 int err;
1810
1811 err = register_pernet_subsys(&ndisc_net_ops);
1812 if (err)
1813 return err;
1771 /* 1814 /*
1772 * Initialize the neighbour table 1815 * Initialize the neighbour table
1773 */ 1816 */
1774
1775 neigh_table_init(&nd_tbl); 1817 neigh_table_init(&nd_tbl);
1776 1818
1777#ifdef CONFIG_SYSCTL 1819#ifdef CONFIG_SYSCTL
1778 neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6, NET_IPV6_NEIGH, 1820 err = neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6,
1779 "ipv6", 1821 NET_IPV6_NEIGH, "ipv6",
1780 &ndisc_ifinfo_sysctl_change, 1822 &ndisc_ifinfo_sysctl_change,
1781 &ndisc_ifinfo_sysctl_strategy); 1823 &ndisc_ifinfo_sysctl_strategy);
1824 if (err)
1825 goto out_unregister_pernet;
1782#endif 1826#endif
1827 err = register_netdevice_notifier(&ndisc_netdev_notifier);
1828 if (err)
1829 goto out_unregister_sysctl;
1830out:
1831 return err;
1783 1832
1784 register_netdevice_notifier(&ndisc_netdev_notifier); 1833out_unregister_sysctl:
1785 return 0; 1834#ifdef CONFIG_SYSCTL
1835 neigh_sysctl_unregister(&nd_tbl.parms);
1836out_unregister_pernet:
1837#endif
1838 unregister_pernet_subsys(&ndisc_net_ops);
1839 goto out;
1786} 1840}
1787 1841
1788void ndisc_cleanup(void) 1842void ndisc_cleanup(void)
@@ -1792,6 +1846,5 @@ void ndisc_cleanup(void)
1792 neigh_sysctl_unregister(&nd_tbl.parms); 1846 neigh_sysctl_unregister(&nd_tbl.parms);
1793#endif 1847#endif
1794 neigh_table_clear(&nd_tbl); 1848 neigh_table_clear(&nd_tbl);
1795 sock_release(ndisc_socket); 1849 unregister_pernet_subsys(&ndisc_net_ops);
1796 ndisc_socket = NULL; /* For safety. */
1797} 1850}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 2e06724dc348..8c6c5e71f210 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -23,7 +23,7 @@ int ip6_route_me_harder(struct sk_buff *skb)
23 .saddr = iph->saddr, } }, 23 .saddr = iph->saddr, } },
24 }; 24 };
25 25
26 dst = ip6_route_output(skb->sk, &fl); 26 dst = ip6_route_output(&init_net, skb->sk, &fl);
27 27
28#ifdef CONFIG_XFRM 28#ifdef CONFIG_XFRM
29 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 29 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -86,7 +86,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
86 86
87static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) 87static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl)
88{ 88{
89 *dst = ip6_route_output(NULL, fl); 89 *dst = ip6_route_output(&init_net, NULL, fl);
90 return (*dst)->error; 90 return (*dst)->error;
91} 91}
92 92
@@ -121,16 +121,44 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
121 } 121 }
122 return csum; 122 return csum;
123} 123}
124
125EXPORT_SYMBOL(nf_ip6_checksum); 124EXPORT_SYMBOL(nf_ip6_checksum);
126 125
126static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
127 unsigned int dataoff, unsigned int len,
128 u_int8_t protocol)
129{
130 struct ipv6hdr *ip6h = ipv6_hdr(skb);
131 __wsum hsum;
132 __sum16 csum = 0;
133
134 switch (skb->ip_summed) {
135 case CHECKSUM_COMPLETE:
136 if (len == skb->len - dataoff)
137 return nf_ip6_checksum(skb, hook, dataoff, protocol);
138 /* fall through */
139 case CHECKSUM_NONE:
140 hsum = skb_checksum(skb, 0, dataoff, 0);
141 skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
142 &ip6h->daddr,
143 skb->len - dataoff,
144 protocol,
145 csum_sub(0, hsum)));
146 skb->ip_summed = CHECKSUM_NONE;
147 csum = __skb_checksum_complete_head(skb, dataoff + len);
148 if (!csum)
149 skb->ip_summed = CHECKSUM_UNNECESSARY;
150 }
151 return csum;
152};
153
127static const struct nf_afinfo nf_ip6_afinfo = { 154static const struct nf_afinfo nf_ip6_afinfo = {
128 .family = AF_INET6, 155 .family = AF_INET6,
129 .checksum = nf_ip6_checksum, 156 .checksum = nf_ip6_checksum,
130 .route = nf_ip6_route, 157 .checksum_partial = nf_ip6_checksum_partial,
131 .saveroute = nf_ip6_saveroute, 158 .route = nf_ip6_route,
132 .reroute = nf_ip6_reroute, 159 .saveroute = nf_ip6_saveroute,
133 .route_key_size = sizeof(struct ip6_rt_info), 160 .reroute = nf_ip6_reroute,
161 .route_key_size = sizeof(struct ip6_rt_info),
134}; 162};
135 163
136int __init ipv6_netfilter_init(void) 164int __init ipv6_netfilter_init(void)
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 8d366f7f2a9a..92a36c9e5402 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -484,7 +484,7 @@ ipq_rcv_dev_event(struct notifier_block *this,
484{ 484{
485 struct net_device *dev = ptr; 485 struct net_device *dev = ptr;
486 486
487 if (dev->nd_net != &init_net) 487 if (dev_net(dev) != &init_net)
488 return NOTIFY_DONE; 488 return NOTIFY_DONE;
489 489
490 /* Drop any packets associated with the downed device */ 490 /* Drop any packets associated with the downed device */
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index bf9bb6e55bb5..0b4557e03431 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -55,7 +55,7 @@ MODULE_DESCRIPTION("IPv6 packet filter");
55do { \ 55do { \
56 if (!(x)) \ 56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \ 57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __FUNCTION__, __FILE__, __LINE__); \ 58 __func__, __FILE__, __LINE__); \
59} while(0) 59} while(0)
60#else 60#else
61#define IP_NF_ASSERT(x) 61#define IP_NF_ASSERT(x)
@@ -325,7 +325,7 @@ static void trace_packet(struct sk_buff *skb,
325 struct ip6t_entry *e) 325 struct ip6t_entry *e)
326{ 326{
327 void *table_base; 327 void *table_base;
328 struct ip6t_entry *root; 328 const struct ip6t_entry *root;
329 char *hookname, *chainname, *comment; 329 char *hookname, *chainname, *comment;
330 unsigned int rulenum = 0; 330 unsigned int rulenum = 0;
331 331
@@ -952,7 +952,7 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
952{ 952{
953 unsigned int countersize; 953 unsigned int countersize;
954 struct xt_counters *counters; 954 struct xt_counters *counters;
955 struct xt_table_info *private = table->private; 955 const struct xt_table_info *private = table->private;
956 956
957 /* We need atomic snapshot of counters: rest doesn't change 957 /* We need atomic snapshot of counters: rest doesn't change
958 (other than comefrom, which userspace doesn't care 958 (other than comefrom, which userspace doesn't care
@@ -979,9 +979,9 @@ copy_entries_to_user(unsigned int total_size,
979 unsigned int off, num; 979 unsigned int off, num;
980 struct ip6t_entry *e; 980 struct ip6t_entry *e;
981 struct xt_counters *counters; 981 struct xt_counters *counters;
982 struct xt_table_info *private = table->private; 982 const struct xt_table_info *private = table->private;
983 int ret = 0; 983 int ret = 0;
984 void *loc_cpu_entry; 984 const void *loc_cpu_entry;
985 985
986 counters = alloc_counters(table); 986 counters = alloc_counters(table);
987 if (IS_ERR(counters)) 987 if (IS_ERR(counters))
@@ -1001,8 +1001,8 @@ copy_entries_to_user(unsigned int total_size,
1001 /* ... then go back and fix counters and names */ 1001 /* ... then go back and fix counters and names */
1002 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 1002 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1003 unsigned int i; 1003 unsigned int i;
1004 struct ip6t_entry_match *m; 1004 const struct ip6t_entry_match *m;
1005 struct ip6t_entry_target *t; 1005 const struct ip6t_entry_target *t;
1006 1006
1007 e = (struct ip6t_entry *)(loc_cpu_entry + off); 1007 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1008 if (copy_to_user(userptr + off 1008 if (copy_to_user(userptr + off
@@ -1142,7 +1142,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1142 "ip6table_%s", name); 1142 "ip6table_%s", name);
1143 if (t && !IS_ERR(t)) { 1143 if (t && !IS_ERR(t)) {
1144 struct ip6t_getinfo info; 1144 struct ip6t_getinfo info;
1145 struct xt_table_info *private = t->private; 1145 const struct xt_table_info *private = t->private;
1146 1146
1147#ifdef CONFIG_COMPAT 1147#ifdef CONFIG_COMPAT
1148 if (compat) { 1148 if (compat) {
@@ -1206,7 +1206,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1206 else { 1206 else {
1207 duprintf("get_entries: I've got %u not %u!\n", 1207 duprintf("get_entries: I've got %u not %u!\n",
1208 private->size, get.size); 1208 private->size, get.size);
1209 ret = -EINVAL; 1209 ret = -EAGAIN;
1210 } 1210 }
1211 module_put(t->me); 1211 module_put(t->me);
1212 xt_table_unlock(t); 1212 xt_table_unlock(t);
@@ -1225,7 +1225,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1225 struct xt_table *t; 1225 struct xt_table *t;
1226 struct xt_table_info *oldinfo; 1226 struct xt_table_info *oldinfo;
1227 struct xt_counters *counters; 1227 struct xt_counters *counters;
1228 void *loc_cpu_old_entry; 1228 const void *loc_cpu_old_entry;
1229 1229
1230 ret = 0; 1230 ret = 0;
1231 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1231 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1369,9 +1369,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1369 int size; 1369 int size;
1370 void *ptmp; 1370 void *ptmp;
1371 struct xt_table *t; 1371 struct xt_table *t;
1372 struct xt_table_info *private; 1372 const struct xt_table_info *private;
1373 int ret = 0; 1373 int ret = 0;
1374 void *loc_cpu_entry; 1374 const void *loc_cpu_entry;
1375#ifdef CONFIG_COMPAT 1375#ifdef CONFIG_COMPAT
1376 struct compat_xt_counters_info compat_tmp; 1376 struct compat_xt_counters_info compat_tmp;
1377 1377
@@ -1879,11 +1879,11 @@ compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1879 1879
1880 switch (cmd) { 1880 switch (cmd) {
1881 case IP6T_SO_SET_REPLACE: 1881 case IP6T_SO_SET_REPLACE:
1882 ret = compat_do_replace(sk->sk_net, user, len); 1882 ret = compat_do_replace(sock_net(sk), user, len);
1883 break; 1883 break;
1884 1884
1885 case IP6T_SO_SET_ADD_COUNTERS: 1885 case IP6T_SO_SET_ADD_COUNTERS:
1886 ret = do_add_counters(sk->sk_net, user, len, 1); 1886 ret = do_add_counters(sock_net(sk), user, len, 1);
1887 break; 1887 break;
1888 1888
1889 default: 1889 default:
@@ -1905,11 +1905,11 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1905 void __user *userptr) 1905 void __user *userptr)
1906{ 1906{
1907 struct xt_counters *counters; 1907 struct xt_counters *counters;
1908 struct xt_table_info *private = table->private; 1908 const struct xt_table_info *private = table->private;
1909 void __user *pos; 1909 void __user *pos;
1910 unsigned int size; 1910 unsigned int size;
1911 int ret = 0; 1911 int ret = 0;
1912 void *loc_cpu_entry; 1912 const void *loc_cpu_entry;
1913 unsigned int i = 0; 1913 unsigned int i = 0;
1914 1914
1915 counters = alloc_counters(table); 1915 counters = alloc_counters(table);
@@ -1956,7 +1956,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1956 xt_compat_lock(AF_INET6); 1956 xt_compat_lock(AF_INET6);
1957 t = xt_find_table_lock(net, AF_INET6, get.name); 1957 t = xt_find_table_lock(net, AF_INET6, get.name);
1958 if (t && !IS_ERR(t)) { 1958 if (t && !IS_ERR(t)) {
1959 struct xt_table_info *private = t->private; 1959 const struct xt_table_info *private = t->private;
1960 struct xt_table_info info; 1960 struct xt_table_info info;
1961 duprintf("t->private->number = %u\n", private->number); 1961 duprintf("t->private->number = %u\n", private->number);
1962 ret = compat_table_info(private, &info); 1962 ret = compat_table_info(private, &info);
@@ -1966,7 +1966,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1966 } else if (!ret) { 1966 } else if (!ret) {
1967 duprintf("compat_get_entries: I've got %u not %u!\n", 1967 duprintf("compat_get_entries: I've got %u not %u!\n",
1968 private->size, get.size); 1968 private->size, get.size);
1969 ret = -EINVAL; 1969 ret = -EAGAIN;
1970 } 1970 }
1971 xt_compat_flush_offsets(AF_INET6); 1971 xt_compat_flush_offsets(AF_INET6);
1972 module_put(t->me); 1972 module_put(t->me);
@@ -1990,10 +1990,10 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1990 1990
1991 switch (cmd) { 1991 switch (cmd) {
1992 case IP6T_SO_GET_INFO: 1992 case IP6T_SO_GET_INFO:
1993 ret = get_info(sk->sk_net, user, len, 1); 1993 ret = get_info(sock_net(sk), user, len, 1);
1994 break; 1994 break;
1995 case IP6T_SO_GET_ENTRIES: 1995 case IP6T_SO_GET_ENTRIES:
1996 ret = compat_get_entries(sk->sk_net, user, len); 1996 ret = compat_get_entries(sock_net(sk), user, len);
1997 break; 1997 break;
1998 default: 1998 default:
1999 ret = do_ip6t_get_ctl(sk, cmd, user, len); 1999 ret = do_ip6t_get_ctl(sk, cmd, user, len);
@@ -2012,11 +2012,11 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2012 2012
2013 switch (cmd) { 2013 switch (cmd) {
2014 case IP6T_SO_SET_REPLACE: 2014 case IP6T_SO_SET_REPLACE:
2015 ret = do_replace(sk->sk_net, user, len); 2015 ret = do_replace(sock_net(sk), user, len);
2016 break; 2016 break;
2017 2017
2018 case IP6T_SO_SET_ADD_COUNTERS: 2018 case IP6T_SO_SET_ADD_COUNTERS:
2019 ret = do_add_counters(sk->sk_net, user, len, 0); 2019 ret = do_add_counters(sock_net(sk), user, len, 0);
2020 break; 2020 break;
2021 2021
2022 default: 2022 default:
@@ -2037,11 +2037,11 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2037 2037
2038 switch (cmd) { 2038 switch (cmd) {
2039 case IP6T_SO_GET_INFO: 2039 case IP6T_SO_GET_INFO:
2040 ret = get_info(sk->sk_net, user, len, 0); 2040 ret = get_info(sock_net(sk), user, len, 0);
2041 break; 2041 break;
2042 2042
2043 case IP6T_SO_GET_ENTRIES: 2043 case IP6T_SO_GET_ENTRIES:
2044 ret = get_entries(sk->sk_net, user, len); 2044 ret = get_entries(sock_net(sk), user, len);
2045 break; 2045 break;
2046 2046
2047 case IP6T_SO_GET_REVISION_MATCH: 2047 case IP6T_SO_GET_REVISION_MATCH:
@@ -2155,7 +2155,8 @@ icmp6_match(const struct sk_buff *skb,
2155 unsigned int protoff, 2155 unsigned int protoff,
2156 bool *hotdrop) 2156 bool *hotdrop)
2157{ 2157{
2158 struct icmp6hdr _icmph, *ic; 2158 const struct icmp6hdr *ic;
2159 struct icmp6hdr _icmph;
2159 const struct ip6t_icmp *icmpinfo = matchinfo; 2160 const struct ip6t_icmp *icmpinfo = matchinfo;
2160 2161
2161 /* Must not be a fragment. */ 2162 /* Must not be a fragment. */
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 86a613810b69..3a2316974f83 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -363,11 +363,15 @@ static void dump_packet(const struct nf_loginfo *info,
363 if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) { 363 if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
364 read_lock_bh(&skb->sk->sk_callback_lock); 364 read_lock_bh(&skb->sk->sk_callback_lock);
365 if (skb->sk->sk_socket && skb->sk->sk_socket->file) 365 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
366 printk("UID=%u GID=%u", 366 printk("UID=%u GID=%u ",
367 skb->sk->sk_socket->file->f_uid, 367 skb->sk->sk_socket->file->f_uid,
368 skb->sk->sk_socket->file->f_gid); 368 skb->sk->sk_socket->file->f_gid);
369 read_unlock_bh(&skb->sk->sk_callback_lock); 369 read_unlock_bh(&skb->sk->sk_callback_lock);
370 } 370 }
371
372 /* Max length: 16 "MARK=0xFFFFFFFF " */
373 if (!recurse && skb->mark)
374 printk("MARK=0x%x ", skb->mark);
371} 375}
372 376
373static struct nf_loginfo default_loginfo = { 377static struct nf_loginfo default_loginfo = {
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index b23baa635fe0..44c8d65a2431 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -41,7 +41,8 @@ static void send_reset(struct sk_buff *oldskb)
41 struct tcphdr otcph, *tcph; 41 struct tcphdr otcph, *tcph;
42 unsigned int otcplen, hh_len; 42 unsigned int otcplen, hh_len;
43 int tcphoff, needs_ack; 43 int tcphoff, needs_ack;
44 struct ipv6hdr *oip6h = ipv6_hdr(oldskb), *ip6h; 44 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
45 struct ipv6hdr *ip6h;
45 struct dst_entry *dst = NULL; 46 struct dst_entry *dst = NULL;
46 u8 proto; 47 u8 proto;
47 struct flowi fl; 48 struct flowi fl;
@@ -93,7 +94,7 @@ static void send_reset(struct sk_buff *oldskb)
93 fl.fl_ip_sport = otcph.dest; 94 fl.fl_ip_sport = otcph.dest;
94 fl.fl_ip_dport = otcph.source; 95 fl.fl_ip_dport = otcph.source;
95 security_skb_classify_flow(oldskb, &fl); 96 security_skb_classify_flow(oldskb, &fl);
96 dst = ip6_route_output(NULL, &fl); 97 dst = ip6_route_output(&init_net, NULL, &fl);
97 if (dst == NULL) 98 if (dst == NULL)
98 return; 99 return;
99 if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0)) 100 if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0))
@@ -177,7 +178,7 @@ reject_tg6(struct sk_buff *skb, const struct net_device *in,
177{ 178{
178 const struct ip6t_reject_info *reject = targinfo; 179 const struct ip6t_reject_info *reject = targinfo;
179 180
180 pr_debug("%s: medium point\n", __FUNCTION__); 181 pr_debug("%s: medium point\n", __func__);
181 /* WARNING: This code causes reentry within ip6tables. 182 /* WARNING: This code causes reentry within ip6tables.
182 This means that the ip6tables jump stack is now crap. We 183 This means that the ip6tables jump stack is now crap. We
183 must return an absolute verdict. --RR */ 184 must return an absolute verdict. --RR */
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 3a940171f829..317a8960a757 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -49,7 +49,8 @@ ipv6header_mt6(const struct sk_buff *skb, const struct net_device *in,
49 temp = 0; 49 temp = 0;
50 50
51 while (ip6t_ext_hdr(nexthdr)) { 51 while (ip6t_ext_hdr(nexthdr)) {
52 struct ipv6_opt_hdr _hdr, *hp; 52 const struct ipv6_opt_hdr *hp;
53 struct ipv6_opt_hdr _hdr;
53 int hdrlen; 54 int hdrlen;
54 55
55 /* Is there enough space for the next ext header? */ 56 /* Is there enough space for the next ext header? */
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 12a9efe9886e..81aaf7aaaabf 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -110,7 +110,8 @@ rt_mt6(const struct sk_buff *skb, const struct net_device *in,
110 !!(rtinfo->invflags & IP6T_RT_INV_TYP))); 110 !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
111 111
112 if (ret && (rtinfo->flags & IP6T_RT_RES)) { 112 if (ret && (rtinfo->flags & IP6T_RT_RES)) {
113 u_int32_t *rp, _reserved; 113 const u_int32_t *rp;
114 u_int32_t _reserved;
114 rp = skb_header_pointer(skb, 115 rp = skb_header_pointer(skb,
115 ptr + offsetof(struct rt0_hdr, 116 ptr + offsetof(struct rt0_hdr,
116 reserved), 117 reserved),
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 2d9cd095a72c..f979e48b469b 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -54,7 +54,7 @@ static struct
54static struct xt_table packet_filter = { 54static struct xt_table packet_filter = {
55 .name = "filter", 55 .name = "filter",
56 .valid_hooks = FILTER_VALID_HOOKS, 56 .valid_hooks = FILTER_VALID_HOOKS,
57 .lock = RW_LOCK_UNLOCKED, 57 .lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
58 .me = THIS_MODULE, 58 .me = THIS_MODULE,
59 .af = AF_INET6, 59 .af = AF_INET6,
60}; 60};
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 035343a90ffe..27a5e8b48d93 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -60,7 +60,7 @@ static struct
60static struct xt_table packet_mangler = { 60static struct xt_table packet_mangler = {
61 .name = "mangle", 61 .name = "mangle",
62 .valid_hooks = MANGLE_VALID_HOOKS, 62 .valid_hooks = MANGLE_VALID_HOOKS,
63 .lock = RW_LOCK_UNLOCKED, 63 .lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
64 .me = THIS_MODULE, 64 .me = THIS_MODULE,
65 .af = AF_INET6, 65 .af = AF_INET6,
66}; 66};
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 5cd84203abfe..92b91077ac29 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -38,7 +38,7 @@ static struct
38static struct xt_table packet_raw = { 38static struct xt_table packet_raw = {
39 .name = "raw", 39 .name = "raw",
40 .valid_hooks = RAW_VALID_HOOKS, 40 .valid_hooks = RAW_VALID_HOOKS,
41 .lock = RW_LOCK_UNLOCKED, 41 .lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
42 .me = THIS_MODULE, 42 .me = THIS_MODULE,
43 .af = AF_INET6, 43 .af = AF_INET6,
44}; 44};
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 3717bdf34f6e..85050c072abd 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -27,8 +27,8 @@
27#include <net/netfilter/nf_conntrack_l3proto.h> 27#include <net/netfilter/nf_conntrack_l3proto.h>
28#include <net/netfilter/nf_conntrack_core.h> 28#include <net/netfilter/nf_conntrack_core.h>
29 29
30static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 30static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
31 struct nf_conntrack_tuple *tuple) 31 struct nf_conntrack_tuple *tuple)
32{ 32{
33 const u_int32_t *ap; 33 const u_int32_t *ap;
34 u_int32_t _addrs[8]; 34 u_int32_t _addrs[8];
@@ -36,21 +36,21 @@ static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
36 ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr), 36 ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr),
37 sizeof(_addrs), _addrs); 37 sizeof(_addrs), _addrs);
38 if (ap == NULL) 38 if (ap == NULL)
39 return 0; 39 return false;
40 40
41 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6)); 41 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
42 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6)); 42 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
43 43
44 return 1; 44 return true;
45} 45}
46 46
47static int ipv6_invert_tuple(struct nf_conntrack_tuple *tuple, 47static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
48 const struct nf_conntrack_tuple *orig) 48 const struct nf_conntrack_tuple *orig)
49{ 49{
50 memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6)); 50 memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6));
51 memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6)); 51 memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6));
52 52
53 return 1; 53 return true;
54} 54}
55 55
56static int ipv6_print_tuple(struct seq_file *s, 56static int ipv6_print_tuple(struct seq_file *s,
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 0897d0f4c4a2..ee713b03e9ec 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -28,21 +28,21 @@
28 28
29static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ; 29static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
30 30
31static int icmpv6_pkt_to_tuple(const struct sk_buff *skb, 31static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
32 unsigned int dataoff, 32 unsigned int dataoff,
33 struct nf_conntrack_tuple *tuple) 33 struct nf_conntrack_tuple *tuple)
34{ 34{
35 const struct icmp6hdr *hp; 35 const struct icmp6hdr *hp;
36 struct icmp6hdr _hdr; 36 struct icmp6hdr _hdr;
37 37
38 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 38 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
39 if (hp == NULL) 39 if (hp == NULL)
40 return 0; 40 return false;
41 tuple->dst.u.icmp.type = hp->icmp6_type; 41 tuple->dst.u.icmp.type = hp->icmp6_type;
42 tuple->src.u.icmp.id = hp->icmp6_identifier; 42 tuple->src.u.icmp.id = hp->icmp6_identifier;
43 tuple->dst.u.icmp.code = hp->icmp6_code; 43 tuple->dst.u.icmp.code = hp->icmp6_code;
44 44
45 return 1; 45 return true;
46} 46}
47 47
48/* Add 1; spaces filled with 0. */ 48/* Add 1; spaces filled with 0. */
@@ -53,17 +53,17 @@ static const u_int8_t invmap[] = {
53 [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1 53 [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1
54}; 54};
55 55
56static int icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple, 56static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
57 const struct nf_conntrack_tuple *orig) 57 const struct nf_conntrack_tuple *orig)
58{ 58{
59 int type = orig->dst.u.icmp.type - 128; 59 int type = orig->dst.u.icmp.type - 128;
60 if (type < 0 || type >= sizeof(invmap) || !invmap[type]) 60 if (type < 0 || type >= sizeof(invmap) || !invmap[type])
61 return 0; 61 return false;
62 62
63 tuple->src.u.icmp.id = orig->src.u.icmp.id; 63 tuple->src.u.icmp.id = orig->src.u.icmp.id;
64 tuple->dst.u.icmp.type = invmap[type] - 1; 64 tuple->dst.u.icmp.type = invmap[type] - 1;
65 tuple->dst.u.icmp.code = orig->dst.u.icmp.code; 65 tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
66 return 1; 66 return true;
67} 67}
68 68
69/* Print out the per-protocol part of the tuple. */ 69/* Print out the per-protocol part of the tuple. */
@@ -102,9 +102,8 @@ static int icmpv6_packet(struct nf_conn *ct,
102} 102}
103 103
104/* Called when a new connection for this protocol found. */ 104/* Called when a new connection for this protocol found. */
105static int icmpv6_new(struct nf_conn *ct, 105static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
106 const struct sk_buff *skb, 106 unsigned int dataoff)
107 unsigned int dataoff)
108{ 107{
109 static const u_int8_t valid_new[] = { 108 static const u_int8_t valid_new[] = {
110 [ICMPV6_ECHO_REQUEST - 128] = 1, 109 [ICMPV6_ECHO_REQUEST - 128] = 1,
@@ -116,11 +115,11 @@ static int icmpv6_new(struct nf_conn *ct,
116 /* Can't create a new ICMPv6 `conn' with this. */ 115 /* Can't create a new ICMPv6 `conn' with this. */
117 pr_debug("icmpv6: can't create new conn with type %u\n", 116 pr_debug("icmpv6: can't create new conn with type %u\n",
118 type + 128); 117 type + 128);
119 NF_CT_DUMP_TUPLE(&ct->tuplehash[0].tuple); 118 nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
120 return 0; 119 return false;
121 } 120 }
122 atomic_set(&ct->proto.icmp.count, 0); 121 atomic_set(&ct->proto.icmp.count, 0);
123 return 1; 122 return true;
124} 123}
125 124
126static int 125static int
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 24c0d03095bf..2dccad48058c 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -103,8 +103,8 @@ struct ctl_table nf_ct_ipv6_sysctl_table[] = {
103}; 103};
104#endif 104#endif
105 105
106static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, 106static unsigned int ip6qhashfn(__be32 id, const struct in6_addr *saddr,
107 struct in6_addr *daddr) 107 const struct in6_addr *daddr)
108{ 108{
109 u32 a, b, c; 109 u32 a, b, c;
110 110
@@ -132,7 +132,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
132 132
133static unsigned int nf_hashfn(struct inet_frag_queue *q) 133static unsigned int nf_hashfn(struct inet_frag_queue *q)
134{ 134{
135 struct nf_ct_frag6_queue *nq; 135 const struct nf_ct_frag6_queue *nq;
136 136
137 nq = container_of(q, struct nf_ct_frag6_queue, q); 137 nq = container_of(q, struct nf_ct_frag6_queue, q);
138 return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr); 138 return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr);
@@ -185,7 +185,7 @@ static void nf_ct_frag6_expire(unsigned long data)
185 185
186 spin_lock(&fq->q.lock); 186 spin_lock(&fq->q.lock);
187 187
188 if (fq->q.last_in & COMPLETE) 188 if (fq->q.last_in & INET_FRAG_COMPLETE)
189 goto out; 189 goto out;
190 190
191 fq_kill(fq); 191 fq_kill(fq);
@@ -222,12 +222,12 @@ oom:
222 222
223 223
224static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, 224static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
225 struct frag_hdr *fhdr, int nhoff) 225 const struct frag_hdr *fhdr, int nhoff)
226{ 226{
227 struct sk_buff *prev, *next; 227 struct sk_buff *prev, *next;
228 int offset, end; 228 int offset, end;
229 229
230 if (fq->q.last_in & COMPLETE) { 230 if (fq->q.last_in & INET_FRAG_COMPLETE) {
231 pr_debug("Allready completed\n"); 231 pr_debug("Allready completed\n");
232 goto err; 232 goto err;
233 } 233 }
@@ -254,11 +254,11 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
254 * or have different end, the segment is corrupted. 254 * or have different end, the segment is corrupted.
255 */ 255 */
256 if (end < fq->q.len || 256 if (end < fq->q.len ||
257 ((fq->q.last_in & LAST_IN) && end != fq->q.len)) { 257 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) {
258 pr_debug("already received last fragment\n"); 258 pr_debug("already received last fragment\n");
259 goto err; 259 goto err;
260 } 260 }
261 fq->q.last_in |= LAST_IN; 261 fq->q.last_in |= INET_FRAG_LAST_IN;
262 fq->q.len = end; 262 fq->q.len = end;
263 } else { 263 } else {
264 /* Check if the fragment is rounded to 8 bytes. 264 /* Check if the fragment is rounded to 8 bytes.
@@ -273,7 +273,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
273 } 273 }
274 if (end > fq->q.len) { 274 if (end > fq->q.len) {
275 /* Some bits beyond end -> corruption. */ 275 /* Some bits beyond end -> corruption. */
276 if (fq->q.last_in & LAST_IN) { 276 if (fq->q.last_in & INET_FRAG_LAST_IN) {
277 pr_debug("last packet already reached.\n"); 277 pr_debug("last packet already reached.\n");
278 goto err; 278 goto err;
279 } 279 }
@@ -385,7 +385,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
385 */ 385 */
386 if (offset == 0) { 386 if (offset == 0) {
387 fq->nhoffset = nhoff; 387 fq->nhoffset = nhoff;
388 fq->q.last_in |= FIRST_IN; 388 fq->q.last_in |= INET_FRAG_FIRST_IN;
389 } 389 }
390 write_lock(&nf_frags.lock); 390 write_lock(&nf_frags.lock);
391 list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list); 391 list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list);
@@ -647,7 +647,8 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
647 goto ret_orig; 647 goto ret_orig;
648 } 648 }
649 649
650 if (fq->q.last_in == (FIRST_IN|LAST_IN) && fq->q.meat == fq->q.len) { 650 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
651 fq->q.meat == fq->q.len) {
651 ret_skb = nf_ct_frag6_reasm(fq, dev); 652 ret_skb = nf_ct_frag6_reasm(fq, dev);
652 if (ret_skb == NULL) 653 if (ret_skb == NULL)
653 pr_debug("Can't reassemble fragmented packets\n"); 654 pr_debug("Can't reassemble fragmented packets\n");
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 199ef379e501..ca8b82f96fe5 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -35,16 +35,18 @@ static struct proc_dir_entry *proc_net_devsnmp6;
35 35
36static int sockstat6_seq_show(struct seq_file *seq, void *v) 36static int sockstat6_seq_show(struct seq_file *seq, void *v)
37{ 37{
38 struct net *net = seq->private;
39
38 seq_printf(seq, "TCP6: inuse %d\n", 40 seq_printf(seq, "TCP6: inuse %d\n",
39 sock_prot_inuse_get(&tcpv6_prot)); 41 sock_prot_inuse_get(net, &tcpv6_prot));
40 seq_printf(seq, "UDP6: inuse %d\n", 42 seq_printf(seq, "UDP6: inuse %d\n",
41 sock_prot_inuse_get(&udpv6_prot)); 43 sock_prot_inuse_get(net, &udpv6_prot));
42 seq_printf(seq, "UDPLITE6: inuse %d\n", 44 seq_printf(seq, "UDPLITE6: inuse %d\n",
43 sock_prot_inuse_get(&udplitev6_prot)); 45 sock_prot_inuse_get(net, &udplitev6_prot));
44 seq_printf(seq, "RAW6: inuse %d\n", 46 seq_printf(seq, "RAW6: inuse %d\n",
45 sock_prot_inuse_get(&rawv6_prot)); 47 sock_prot_inuse_get(net, &rawv6_prot));
46 seq_printf(seq, "FRAG6: inuse %d memory %d\n", 48 seq_printf(seq, "FRAG6: inuse %d memory %d\n",
47 ip6_frag_nqueues(&init_net), ip6_frag_mem(&init_net)); 49 ip6_frag_nqueues(net), ip6_frag_mem(net));
48 return 0; 50 return 0;
49} 51}
50 52
@@ -183,7 +185,32 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
183 185
184static int sockstat6_seq_open(struct inode *inode, struct file *file) 186static int sockstat6_seq_open(struct inode *inode, struct file *file)
185{ 187{
186 return single_open(file, sockstat6_seq_show, NULL); 188 int err;
189 struct net *net;
190
191 err = -ENXIO;
192 net = get_proc_net(inode);
193 if (net == NULL)
194 goto err_net;
195
196 err = single_open(file, sockstat6_seq_show, net);
197 if (err < 0)
198 goto err_open;
199
200 return 0;
201
202err_open:
203 put_net(net);
204err_net:
205 return err;
206}
207
208static int sockstat6_seq_release(struct inode *inode, struct file *file)
209{
210 struct net *net = ((struct seq_file *)file->private_data)->private;
211
212 put_net(net);
213 return single_release(inode, file);
187} 214}
188 215
189static const struct file_operations sockstat6_seq_fops = { 216static const struct file_operations sockstat6_seq_fops = {
@@ -191,7 +218,7 @@ static const struct file_operations sockstat6_seq_fops = {
191 .open = sockstat6_seq_open, 218 .open = sockstat6_seq_open,
192 .read = seq_read, 219 .read = seq_read,
193 .llseek = seq_lseek, 220 .llseek = seq_lseek,
194 .release = single_release, 221 .release = sockstat6_seq_release,
195}; 222};
196 223
197static int snmp6_seq_open(struct inode *inode, struct file *file) 224static int snmp6_seq_open(struct inode *inode, struct file *file)
@@ -214,6 +241,9 @@ int snmp6_register_dev(struct inet6_dev *idev)
214 if (!idev || !idev->dev) 241 if (!idev || !idev->dev)
215 return -EINVAL; 242 return -EINVAL;
216 243
244 if (dev_net(idev->dev) != &init_net)
245 return 0;
246
217 if (!proc_net_devsnmp6) 247 if (!proc_net_devsnmp6)
218 return -ENOENT; 248 return -ENOENT;
219 249
@@ -240,27 +270,45 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
240 return 0; 270 return 0;
241} 271}
242 272
273static int ipv6_proc_init_net(struct net *net)
274{
275 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO,
276 &sockstat6_seq_fops))
277 return -ENOMEM;
278 return 0;
279}
280
281static void ipv6_proc_exit_net(struct net *net)
282{
283 proc_net_remove(net, "sockstat6");
284}
285
286static struct pernet_operations ipv6_proc_ops = {
287 .init = ipv6_proc_init_net,
288 .exit = ipv6_proc_exit_net,
289};
290
243int __init ipv6_misc_proc_init(void) 291int __init ipv6_misc_proc_init(void)
244{ 292{
245 int rc = 0; 293 int rc = 0;
246 294
295 if (register_pernet_subsys(&ipv6_proc_ops))
296 goto proc_net_fail;
297
247 if (!proc_net_fops_create(&init_net, "snmp6", S_IRUGO, &snmp6_seq_fops)) 298 if (!proc_net_fops_create(&init_net, "snmp6", S_IRUGO, &snmp6_seq_fops))
248 goto proc_snmp6_fail; 299 goto proc_snmp6_fail;
249 300
250 proc_net_devsnmp6 = proc_mkdir("dev_snmp6", init_net.proc_net); 301 proc_net_devsnmp6 = proc_mkdir("dev_snmp6", init_net.proc_net);
251 if (!proc_net_devsnmp6) 302 if (!proc_net_devsnmp6)
252 goto proc_dev_snmp6_fail; 303 goto proc_dev_snmp6_fail;
253
254 if (!proc_net_fops_create(&init_net, "sockstat6", S_IRUGO, &sockstat6_seq_fops))
255 goto proc_sockstat6_fail;
256out: 304out:
257 return rc; 305 return rc;
258 306
259proc_sockstat6_fail:
260 proc_net_remove(&init_net, "dev_snmp6");
261proc_dev_snmp6_fail: 307proc_dev_snmp6_fail:
262 proc_net_remove(&init_net, "snmp6"); 308 proc_net_remove(&init_net, "snmp6");
263proc_snmp6_fail: 309proc_snmp6_fail:
310 unregister_pernet_subsys(&ipv6_proc_ops);
311proc_net_fail:
264 rc = -ENOMEM; 312 rc = -ENOMEM;
265 goto out; 313 goto out;
266} 314}
@@ -270,5 +318,6 @@ void ipv6_misc_proc_exit(void)
270 proc_net_remove(&init_net, "sockstat6"); 318 proc_net_remove(&init_net, "sockstat6");
271 proc_net_remove(&init_net, "dev_snmp6"); 319 proc_net_remove(&init_net, "dev_snmp6");
272 proc_net_remove(&init_net, "snmp6"); 320 proc_net_remove(&init_net, "snmp6");
321 unregister_pernet_subsys(&ipv6_proc_ops);
273} 322}
274 323
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 0a6fbc1d1a50..6193b124cbc7 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -53,6 +53,7 @@
53#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 53#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
54#include <net/mip6.h> 54#include <net/mip6.h>
55#endif 55#endif
56#include <linux/mroute6.h>
56 57
57#include <net/raw.h> 58#include <net/raw.h>
58#include <net/rawv6.h> 59#include <net/rawv6.h>
@@ -62,20 +63,9 @@
62#include <linux/seq_file.h> 63#include <linux/seq_file.h>
63 64
64static struct raw_hashinfo raw_v6_hashinfo = { 65static struct raw_hashinfo raw_v6_hashinfo = {
65 .lock = __RW_LOCK_UNLOCKED(), 66 .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
66}; 67};
67 68
68static void raw_v6_hash(struct sock *sk)
69{
70 raw_hash_sk(sk, &raw_v6_hashinfo);
71}
72
73static void raw_v6_unhash(struct sock *sk)
74{
75 raw_unhash_sk(sk, &raw_v6_hashinfo);
76}
77
78
79static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, 69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
80 unsigned short num, struct in6_addr *loc_addr, 70 unsigned short num, struct in6_addr *loc_addr,
81 struct in6_addr *rmt_addr, int dif) 71 struct in6_addr *rmt_addr, int dif)
@@ -87,7 +77,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
87 if (inet_sk(sk)->num == num) { 77 if (inet_sk(sk)->num == num) {
88 struct ipv6_pinfo *np = inet6_sk(sk); 78 struct ipv6_pinfo *np = inet6_sk(sk);
89 79
90 if (sk->sk_net != net) 80 if (!net_eq(sock_net(sk), net))
91 continue; 81 continue;
92 82
93 if (!ipv6_addr_any(&np->daddr) && 83 if (!ipv6_addr_any(&np->daddr) &&
@@ -179,15 +169,10 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
179 read_lock(&raw_v6_hashinfo.lock); 169 read_lock(&raw_v6_hashinfo.lock);
180 sk = sk_head(&raw_v6_hashinfo.ht[hash]); 170 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
181 171
182 /*
183 * The first socket found will be delivered after
184 * delivery to transport protocols.
185 */
186
187 if (sk == NULL) 172 if (sk == NULL)
188 goto out; 173 goto out;
189 174
190 net = skb->dev->nd_net; 175 net = dev_net(skb->dev);
191 sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif); 176 sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
192 177
193 while (sk) { 178 while (sk) {
@@ -291,7 +276,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
291 if (!sk->sk_bound_dev_if) 276 if (!sk->sk_bound_dev_if)
292 goto out; 277 goto out;
293 278
294 dev = dev_get_by_index(sk->sk_net, sk->sk_bound_dev_if); 279 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
295 if (!dev) { 280 if (!dev) {
296 err = -ENODEV; 281 err = -ENODEV;
297 goto out; 282 goto out;
@@ -304,7 +289,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
304 v4addr = LOOPBACK4_IPV6; 289 v4addr = LOOPBACK4_IPV6;
305 if (!(addr_type & IPV6_ADDR_MULTICAST)) { 290 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
306 err = -EADDRNOTAVAIL; 291 err = -EADDRNOTAVAIL;
307 if (!ipv6_chk_addr(sk->sk_net, &addr->sin6_addr, 292 if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
308 dev, 0)) { 293 dev, 0)) {
309 if (dev) 294 if (dev)
310 dev_put(dev); 295 dev_put(dev);
@@ -372,11 +357,11 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
372 read_lock(&raw_v6_hashinfo.lock); 357 read_lock(&raw_v6_hashinfo.lock);
373 sk = sk_head(&raw_v6_hashinfo.ht[hash]); 358 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
374 if (sk != NULL) { 359 if (sk != NULL) {
375 struct ipv6hdr *hdr = (struct ipv6hdr *) skb->data; 360 /* Note: ipv6_hdr(skb) != skb->data */
376 361 struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
377 saddr = &hdr->saddr; 362 saddr = &ip6h->saddr;
378 daddr = &hdr->daddr; 363 daddr = &ip6h->daddr;
379 net = skb->dev->nd_net; 364 net = dev_net(skb->dev);
380 365
381 while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr, 366 while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
382 IP6CB(skb)->iif))) { 367 IP6CB(skb)->iif))) {
@@ -822,15 +807,6 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
822 fl.fl6_flowlabel = np->flow_label; 807 fl.fl6_flowlabel = np->flow_label;
823 } 808 }
824 809
825 if (ipv6_addr_any(daddr)) {
826 /*
827 * unspecified destination address
828 * treated as error... is this correct ?
829 */
830 fl6_sock_release(flowlabel);
831 return(-EINVAL);
832 }
833
834 if (fl.oif == 0) 810 if (fl.oif == 0)
835 fl.oif = sk->sk_bound_dev_if; 811 fl.oif = sk->sk_bound_dev_if;
836 812
@@ -863,7 +839,10 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
863 if (err) 839 if (err)
864 goto out; 840 goto out;
865 841
866 ipv6_addr_copy(&fl.fl6_dst, daddr); 842 if (!ipv6_addr_any(daddr))
843 ipv6_addr_copy(&fl.fl6_dst, daddr);
844 else
845 fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
867 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) 846 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
868 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 847 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
869 848
@@ -898,9 +877,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
898 else 877 else
899 hlimit = np->hop_limit; 878 hlimit = np->hop_limit;
900 if (hlimit < 0) 879 if (hlimit < 0)
901 hlimit = dst_metric(dst, RTAX_HOPLIMIT); 880 hlimit = ip6_dst_hoplimit(dst);
902 if (hlimit < 0)
903 hlimit = ipv6_get_hoplimit(dst->dev);
904 } 881 }
905 882
906 if (tclass < 0) { 883 if (tclass < 0) {
@@ -1155,7 +1132,11 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1155 } 1132 }
1156 1133
1157 default: 1134 default:
1135#ifdef CONFIG_IPV6_MROUTE
1136 return ip6mr_ioctl(sk, cmd, (void __user *)arg);
1137#else
1158 return -ENOIOCTLCMD; 1138 return -ENOIOCTLCMD;
1139#endif
1159 } 1140 }
1160} 1141}
1161 1142
@@ -1163,7 +1144,7 @@ static void rawv6_close(struct sock *sk, long timeout)
1163{ 1144{
1164 if (inet_sk(sk)->num == IPPROTO_RAW) 1145 if (inet_sk(sk)->num == IPPROTO_RAW)
1165 ip6_ra_control(sk, -1, NULL); 1146 ip6_ra_control(sk, -1, NULL);
1166 1147 ip6mr_sk_done(sk);
1167 sk_common_release(sk); 1148 sk_common_release(sk);
1168} 1149}
1169 1150
@@ -1186,8 +1167,6 @@ static int rawv6_init_sk(struct sock *sk)
1186 return(0); 1167 return(0);
1187} 1168}
1188 1169
1189DEFINE_PROTO_INUSE(rawv6)
1190
1191struct proto rawv6_prot = { 1170struct proto rawv6_prot = {
1192 .name = "RAWv6", 1171 .name = "RAWv6",
1193 .owner = THIS_MODULE, 1172 .owner = THIS_MODULE,
@@ -1203,14 +1182,14 @@ struct proto rawv6_prot = {
1203 .recvmsg = rawv6_recvmsg, 1182 .recvmsg = rawv6_recvmsg,
1204 .bind = rawv6_bind, 1183 .bind = rawv6_bind,
1205 .backlog_rcv = rawv6_rcv_skb, 1184 .backlog_rcv = rawv6_rcv_skb,
1206 .hash = raw_v6_hash, 1185 .hash = raw_hash_sk,
1207 .unhash = raw_v6_unhash, 1186 .unhash = raw_unhash_sk,
1208 .obj_size = sizeof(struct raw6_sock), 1187 .obj_size = sizeof(struct raw6_sock),
1188 .h.raw_hash = &raw_v6_hashinfo,
1209#ifdef CONFIG_COMPAT 1189#ifdef CONFIG_COMPAT
1210 .compat_setsockopt = compat_rawv6_setsockopt, 1190 .compat_setsockopt = compat_rawv6_setsockopt,
1211 .compat_getsockopt = compat_rawv6_getsockopt, 1191 .compat_getsockopt = compat_rawv6_getsockopt,
1212#endif 1192#endif
1213 REF_PROTO_INUSE(rawv6)
1214}; 1193};
1215 1194
1216#ifdef CONFIG_PROC_FS 1195#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index f936d045a39d..7b247e3a16fe 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -202,7 +202,7 @@ static void ip6_frag_expire(unsigned long data)
202 202
203 spin_lock(&fq->q.lock); 203 spin_lock(&fq->q.lock);
204 204
205 if (fq->q.last_in & COMPLETE) 205 if (fq->q.last_in & INET_FRAG_COMPLETE)
206 goto out; 206 goto out;
207 207
208 fq_kill(fq); 208 fq_kill(fq);
@@ -217,7 +217,7 @@ static void ip6_frag_expire(unsigned long data)
217 rcu_read_unlock(); 217 rcu_read_unlock();
218 218
219 /* Don't send error if the first segment did not arrive. */ 219 /* Don't send error if the first segment did not arrive. */
220 if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments) 220 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
221 goto out; 221 goto out;
222 222
223 /* 223 /*
@@ -265,7 +265,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
265 struct net_device *dev; 265 struct net_device *dev;
266 int offset, end; 266 int offset, end;
267 267
268 if (fq->q.last_in & COMPLETE) 268 if (fq->q.last_in & INET_FRAG_COMPLETE)
269 goto err; 269 goto err;
270 270
271 offset = ntohs(fhdr->frag_off) & ~0x7; 271 offset = ntohs(fhdr->frag_off) & ~0x7;
@@ -294,9 +294,9 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
294 * or have different end, the segment is corrupted. 294 * or have different end, the segment is corrupted.
295 */ 295 */
296 if (end < fq->q.len || 296 if (end < fq->q.len ||
297 ((fq->q.last_in & LAST_IN) && end != fq->q.len)) 297 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
298 goto err; 298 goto err;
299 fq->q.last_in |= LAST_IN; 299 fq->q.last_in |= INET_FRAG_LAST_IN;
300 fq->q.len = end; 300 fq->q.len = end;
301 } else { 301 } else {
302 /* Check if the fragment is rounded to 8 bytes. 302 /* Check if the fragment is rounded to 8 bytes.
@@ -314,7 +314,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
314 } 314 }
315 if (end > fq->q.len) { 315 if (end > fq->q.len) {
316 /* Some bits beyond end -> corruption. */ 316 /* Some bits beyond end -> corruption. */
317 if (fq->q.last_in & LAST_IN) 317 if (fq->q.last_in & INET_FRAG_LAST_IN)
318 goto err; 318 goto err;
319 fq->q.len = end; 319 fq->q.len = end;
320 } 320 }
@@ -417,10 +417,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
417 */ 417 */
418 if (offset == 0) { 418 if (offset == 0) {
419 fq->nhoffset = nhoff; 419 fq->nhoffset = nhoff;
420 fq->q.last_in |= FIRST_IN; 420 fq->q.last_in |= INET_FRAG_FIRST_IN;
421 } 421 }
422 422
423 if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len) 423 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
424 fq->q.meat == fq->q.len)
424 return ip6_frag_reasm(fq, prev, dev); 425 return ip6_frag_reasm(fq, prev, dev);
425 426
426 write_lock(&ip6_frags.lock); 427 write_lock(&ip6_frags.lock);
@@ -600,7 +601,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
600 return 1; 601 return 1;
601 } 602 }
602 603
603 net = skb->dev->nd_net; 604 net = dev_net(skb->dev);
604 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 605 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
605 ip6_evictor(net, ip6_dst_idev(skb->dst)); 606 ip6_evictor(net, ip6_dst_idev(skb->dst));
606 607
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e8b241cb60bc..210a079cfc6f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -36,10 +36,12 @@
36#include <linux/route.h> 36#include <linux/route.h>
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/in6.h> 38#include <linux/in6.h>
39#include <linux/mroute6.h>
39#include <linux/init.h> 40#include <linux/init.h>
40#include <linux/if_arp.h> 41#include <linux/if_arp.h>
41#include <linux/proc_fs.h> 42#include <linux/proc_fs.h>
42#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/nsproxy.h>
43#include <net/net_namespace.h> 45#include <net/net_namespace.h>
44#include <net/snmp.h> 46#include <net/snmp.h>
45#include <net/ipv6.h> 47#include <net/ipv6.h>
@@ -87,14 +89,16 @@ static void ip6_link_failure(struct sk_buff *skb);
87static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 89static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
88 90
89#ifdef CONFIG_IPV6_ROUTE_INFO 91#ifdef CONFIG_IPV6_ROUTE_INFO
90static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen, 92static struct rt6_info *rt6_add_route_info(struct net *net,
93 struct in6_addr *prefix, int prefixlen,
91 struct in6_addr *gwaddr, int ifindex, 94 struct in6_addr *gwaddr, int ifindex,
92 unsigned pref); 95 unsigned pref);
93static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen, 96static struct rt6_info *rt6_get_route_info(struct net *net,
97 struct in6_addr *prefix, int prefixlen,
94 struct in6_addr *gwaddr, int ifindex); 98 struct in6_addr *gwaddr, int ifindex);
95#endif 99#endif
96 100
97static struct dst_ops ip6_dst_ops = { 101static struct dst_ops ip6_dst_ops_template = {
98 .family = AF_INET6, 102 .family = AF_INET6,
99 .protocol = __constant_htons(ETH_P_IPV6), 103 .protocol = __constant_htons(ETH_P_IPV6),
100 .gc = ip6_dst_gc, 104 .gc = ip6_dst_gc,
@@ -124,7 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
124 .entries = ATOMIC_INIT(0), 128 .entries = ATOMIC_INIT(0),
125}; 129};
126 130
127struct rt6_info ip6_null_entry = { 131static struct rt6_info ip6_null_entry_template = {
128 .u = { 132 .u = {
129 .dst = { 133 .dst = {
130 .__refcnt = ATOMIC_INIT(1), 134 .__refcnt = ATOMIC_INIT(1),
@@ -134,8 +138,6 @@ struct rt6_info ip6_null_entry = {
134 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 138 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
135 .input = ip6_pkt_discard, 139 .input = ip6_pkt_discard,
136 .output = ip6_pkt_discard_out, 140 .output = ip6_pkt_discard_out,
137 .ops = &ip6_dst_ops,
138 .path = (struct dst_entry*)&ip6_null_entry,
139 } 141 }
140 }, 142 },
141 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 143 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
@@ -148,7 +150,7 @@ struct rt6_info ip6_null_entry = {
148static int ip6_pkt_prohibit(struct sk_buff *skb); 150static int ip6_pkt_prohibit(struct sk_buff *skb);
149static int ip6_pkt_prohibit_out(struct sk_buff *skb); 151static int ip6_pkt_prohibit_out(struct sk_buff *skb);
150 152
151struct rt6_info ip6_prohibit_entry = { 153struct rt6_info ip6_prohibit_entry_template = {
152 .u = { 154 .u = {
153 .dst = { 155 .dst = {
154 .__refcnt = ATOMIC_INIT(1), 156 .__refcnt = ATOMIC_INIT(1),
@@ -158,8 +160,6 @@ struct rt6_info ip6_prohibit_entry = {
158 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 160 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
159 .input = ip6_pkt_prohibit, 161 .input = ip6_pkt_prohibit,
160 .output = ip6_pkt_prohibit_out, 162 .output = ip6_pkt_prohibit_out,
161 .ops = &ip6_dst_ops,
162 .path = (struct dst_entry*)&ip6_prohibit_entry,
163 } 163 }
164 }, 164 },
165 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 165 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
@@ -167,7 +167,7 @@ struct rt6_info ip6_prohibit_entry = {
167 .rt6i_ref = ATOMIC_INIT(1), 167 .rt6i_ref = ATOMIC_INIT(1),
168}; 168};
169 169
170struct rt6_info ip6_blk_hole_entry = { 170static struct rt6_info ip6_blk_hole_entry_template = {
171 .u = { 171 .u = {
172 .dst = { 172 .dst = {
173 .__refcnt = ATOMIC_INIT(1), 173 .__refcnt = ATOMIC_INIT(1),
@@ -177,8 +177,6 @@ struct rt6_info ip6_blk_hole_entry = {
177 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 177 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
178 .input = dst_discard, 178 .input = dst_discard,
179 .output = dst_discard, 179 .output = dst_discard,
180 .ops = &ip6_dst_ops,
181 .path = (struct dst_entry*)&ip6_blk_hole_entry,
182 } 180 }
183 }, 181 },
184 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 182 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
@@ -189,9 +187,9 @@ struct rt6_info ip6_blk_hole_entry = {
189#endif 187#endif
190 188
191/* allocate dst with ip6_dst_ops */ 189/* allocate dst with ip6_dst_ops */
192static __inline__ struct rt6_info *ip6_dst_alloc(void) 190static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
193{ 191{
194 return (struct rt6_info *)dst_alloc(&ip6_dst_ops); 192 return (struct rt6_info *)dst_alloc(ops);
195} 193}
196 194
197static void ip6_dst_destroy(struct dst_entry *dst) 195static void ip6_dst_destroy(struct dst_entry *dst)
@@ -211,7 +209,7 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
211 struct rt6_info *rt = (struct rt6_info *)dst; 209 struct rt6_info *rt = (struct rt6_info *)dst;
212 struct inet6_dev *idev = rt->rt6i_idev; 210 struct inet6_dev *idev = rt->rt6i_idev;
213 struct net_device *loopback_dev = 211 struct net_device *loopback_dev =
214 dev->nd_net->loopback_dev; 212 dev_net(dev)->loopback_dev;
215 213
216 if (dev != loopback_dev && idev != NULL && idev->dev == dev) { 214 if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
217 struct inet6_dev *loopback_idev = 215 struct inet6_dev *loopback_idev =
@@ -239,7 +237,8 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
239 * Route lookup. Any table->tb6_lock is implied. 237 * Route lookup. Any table->tb6_lock is implied.
240 */ 238 */
241 239
242static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt, 240static inline struct rt6_info *rt6_device_match(struct net *net,
241 struct rt6_info *rt,
243 int oif, 242 int oif,
244 int strict) 243 int strict)
245{ 244{
@@ -268,7 +267,7 @@ static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
268 return local; 267 return local;
269 268
270 if (strict) 269 if (strict)
271 return &ip6_null_entry; 270 return net->ipv6.ip6_null_entry;
272 } 271 }
273 return rt; 272 return rt;
274} 273}
@@ -409,9 +408,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
409static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) 408static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
410{ 409{
411 struct rt6_info *match, *rt0; 410 struct rt6_info *match, *rt0;
411 struct net *net;
412 412
413 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n", 413 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
414 __FUNCTION__, fn->leaf, oif); 414 __func__, fn->leaf, oif);
415 415
416 rt0 = fn->rr_ptr; 416 rt0 = fn->rr_ptr;
417 if (!rt0) 417 if (!rt0)
@@ -432,15 +432,17 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
432 } 432 }
433 433
434 RT6_TRACE("%s() => %p\n", 434 RT6_TRACE("%s() => %p\n",
435 __FUNCTION__, match); 435 __func__, match);
436 436
437 return (match ? match : &ip6_null_entry); 437 net = dev_net(rt0->rt6i_dev);
438 return (match ? match : net->ipv6.ip6_null_entry);
438} 439}
439 440
440#ifdef CONFIG_IPV6_ROUTE_INFO 441#ifdef CONFIG_IPV6_ROUTE_INFO
441int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 442int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
442 struct in6_addr *gwaddr) 443 struct in6_addr *gwaddr)
443{ 444{
445 struct net *net = dev_net(dev);
444 struct route_info *rinfo = (struct route_info *) opt; 446 struct route_info *rinfo = (struct route_info *) opt;
445 struct in6_addr prefix_buf, *prefix; 447 struct in6_addr prefix_buf, *prefix;
446 unsigned int pref; 448 unsigned int pref;
@@ -488,7 +490,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
488 prefix = &prefix_buf; 490 prefix = &prefix_buf;
489 } 491 }
490 492
491 rt = rt6_get_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex); 493 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
494 dev->ifindex);
492 495
493 if (rt && !lifetime) { 496 if (rt && !lifetime) {
494 ip6_del_rt(rt); 497 ip6_del_rt(rt);
@@ -496,7 +499,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
496 } 499 }
497 500
498 if (!rt && lifetime) 501 if (!rt && lifetime)
499 rt = rt6_add_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex, 502 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
500 pref); 503 pref);
501 else if (rt) 504 else if (rt)
502 rt->rt6i_flags = RTF_ROUTEINFO | 505 rt->rt6i_flags = RTF_ROUTEINFO |
@@ -515,9 +518,9 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
515} 518}
516#endif 519#endif
517 520
518#define BACKTRACK(saddr) \ 521#define BACKTRACK(__net, saddr) \
519do { \ 522do { \
520 if (rt == &ip6_null_entry) { \ 523 if (rt == __net->ipv6.ip6_null_entry) { \
521 struct fib6_node *pn; \ 524 struct fib6_node *pn; \
522 while (1) { \ 525 while (1) { \
523 if (fn->fn_flags & RTN_TL_ROOT) \ 526 if (fn->fn_flags & RTN_TL_ROOT) \
@@ -533,7 +536,8 @@ do { \
533 } \ 536 } \
534} while(0) 537} while(0)
535 538
536static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table, 539static struct rt6_info *ip6_pol_route_lookup(struct net *net,
540 struct fib6_table *table,
537 struct flowi *fl, int flags) 541 struct flowi *fl, int flags)
538{ 542{
539 struct fib6_node *fn; 543 struct fib6_node *fn;
@@ -543,8 +547,8 @@ static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table,
543 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); 547 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
544restart: 548restart:
545 rt = fn->leaf; 549 rt = fn->leaf;
546 rt = rt6_device_match(rt, fl->oif, flags); 550 rt = rt6_device_match(net, rt, fl->oif, flags);
547 BACKTRACK(&fl->fl6_src); 551 BACKTRACK(net, &fl->fl6_src);
548out: 552out:
549 dst_use(&rt->u.dst, jiffies); 553 dst_use(&rt->u.dst, jiffies);
550 read_unlock_bh(&table->tb6_lock); 554 read_unlock_bh(&table->tb6_lock);
@@ -552,8 +556,8 @@ out:
552 556
553} 557}
554 558
555struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr, 559struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
556 int oif, int strict) 560 const struct in6_addr *saddr, int oif, int strict)
557{ 561{
558 struct flowi fl = { 562 struct flowi fl = {
559 .oif = oif, 563 .oif = oif,
@@ -571,7 +575,7 @@ struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
571 flags |= RT6_LOOKUP_F_HAS_SADDR; 575 flags |= RT6_LOOKUP_F_HAS_SADDR;
572 } 576 }
573 577
574 dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup); 578 dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup);
575 if (dst->error == 0) 579 if (dst->error == 0)
576 return (struct rt6_info *) dst; 580 return (struct rt6_info *) dst;
577 581
@@ -604,7 +608,7 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
604int ip6_ins_rt(struct rt6_info *rt) 608int ip6_ins_rt(struct rt6_info *rt)
605{ 609{
606 struct nl_info info = { 610 struct nl_info info = {
607 .nl_net = &init_net, 611 .nl_net = dev_net(rt->rt6i_dev),
608 }; 612 };
609 return __ip6_ins_rt(rt, &info); 613 return __ip6_ins_rt(rt, &info);
610} 614}
@@ -660,8 +664,8 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d
660 return rt; 664 return rt;
661} 665}
662 666
663static struct rt6_info *ip6_pol_route(struct fib6_table *table, int oif, 667static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
664 struct flowi *fl, int flags) 668 struct flowi *fl, int flags)
665{ 669{
666 struct fib6_node *fn; 670 struct fib6_node *fn;
667 struct rt6_info *rt, *nrt; 671 struct rt6_info *rt, *nrt;
@@ -680,8 +684,9 @@ restart_2:
680 684
681restart: 685restart:
682 rt = rt6_select(fn, oif, strict | reachable); 686 rt = rt6_select(fn, oif, strict | reachable);
683 BACKTRACK(&fl->fl6_src); 687
684 if (rt == &ip6_null_entry || 688 BACKTRACK(net, &fl->fl6_src);
689 if (rt == net->ipv6.ip6_null_entry ||
685 rt->rt6i_flags & RTF_CACHE) 690 rt->rt6i_flags & RTF_CACHE)
686 goto out; 691 goto out;
687 692
@@ -699,7 +704,7 @@ restart:
699 } 704 }
700 705
701 dst_release(&rt->u.dst); 706 dst_release(&rt->u.dst);
702 rt = nrt ? : &ip6_null_entry; 707 rt = nrt ? : net->ipv6.ip6_null_entry;
703 708
704 dst_hold(&rt->u.dst); 709 dst_hold(&rt->u.dst);
705 if (nrt) { 710 if (nrt) {
@@ -732,15 +737,16 @@ out2:
732 return rt; 737 return rt;
733} 738}
734 739
735static struct rt6_info *ip6_pol_route_input(struct fib6_table *table, 740static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
736 struct flowi *fl, int flags) 741 struct flowi *fl, int flags)
737{ 742{
738 return ip6_pol_route(table, fl->iif, fl, flags); 743 return ip6_pol_route(net, table, fl->iif, fl, flags);
739} 744}
740 745
741void ip6_route_input(struct sk_buff *skb) 746void ip6_route_input(struct sk_buff *skb)
742{ 747{
743 struct ipv6hdr *iph = ipv6_hdr(skb); 748 struct ipv6hdr *iph = ipv6_hdr(skb);
749 struct net *net = dev_net(skb->dev);
744 int flags = RT6_LOOKUP_F_HAS_SADDR; 750 int flags = RT6_LOOKUP_F_HAS_SADDR;
745 struct flowi fl = { 751 struct flowi fl = {
746 .iif = skb->dev->ifindex, 752 .iif = skb->dev->ifindex,
@@ -758,16 +764,17 @@ void ip6_route_input(struct sk_buff *skb)
758 if (rt6_need_strict(&iph->daddr)) 764 if (rt6_need_strict(&iph->daddr))
759 flags |= RT6_LOOKUP_F_IFACE; 765 flags |= RT6_LOOKUP_F_IFACE;
760 766
761 skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input); 767 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
762} 768}
763 769
764static struct rt6_info *ip6_pol_route_output(struct fib6_table *table, 770static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
765 struct flowi *fl, int flags) 771 struct flowi *fl, int flags)
766{ 772{
767 return ip6_pol_route(table, fl->oif, fl, flags); 773 return ip6_pol_route(net, table, fl->oif, fl, flags);
768} 774}
769 775
770struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl) 776struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
777 struct flowi *fl)
771{ 778{
772 int flags = 0; 779 int flags = 0;
773 780
@@ -776,8 +783,17 @@ struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
776 783
777 if (!ipv6_addr_any(&fl->fl6_src)) 784 if (!ipv6_addr_any(&fl->fl6_src))
778 flags |= RT6_LOOKUP_F_HAS_SADDR; 785 flags |= RT6_LOOKUP_F_HAS_SADDR;
786 else if (sk) {
787 unsigned int prefs = inet6_sk(sk)->srcprefs;
788 if (prefs & IPV6_PREFER_SRC_TMP)
789 flags |= RT6_LOOKUP_F_SRCPREF_TMP;
790 if (prefs & IPV6_PREFER_SRC_PUBLIC)
791 flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC;
792 if (prefs & IPV6_PREFER_SRC_COA)
793 flags |= RT6_LOOKUP_F_SRCPREF_COA;
794 }
779 795
780 return fib6_rule_lookup(fl, flags, ip6_pol_route_output); 796 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
781} 797}
782 798
783EXPORT_SYMBOL(ip6_route_output); 799EXPORT_SYMBOL(ip6_route_output);
@@ -886,12 +902,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
886 902
887static int ipv6_get_mtu(struct net_device *dev); 903static int ipv6_get_mtu(struct net_device *dev);
888 904
889static inline unsigned int ipv6_advmss(unsigned int mtu) 905static inline unsigned int ipv6_advmss(struct net *net, unsigned int mtu)
890{ 906{
891 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); 907 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
892 908
893 if (mtu < init_net.ipv6.sysctl.ip6_rt_min_advmss) 909 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
894 mtu = init_net.ipv6.sysctl.ip6_rt_min_advmss; 910 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
895 911
896 /* 912 /*
897 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and 913 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
@@ -904,21 +920,21 @@ static inline unsigned int ipv6_advmss(unsigned int mtu)
904 return mtu; 920 return mtu;
905} 921}
906 922
907static struct dst_entry *ndisc_dst_gc_list; 923static struct dst_entry *icmp6_dst_gc_list;
908static DEFINE_SPINLOCK(ndisc_lock); 924static DEFINE_SPINLOCK(icmp6_dst_lock);
909 925
910struct dst_entry *ndisc_dst_alloc(struct net_device *dev, 926struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
911 struct neighbour *neigh, 927 struct neighbour *neigh,
912 struct in6_addr *addr, 928 const struct in6_addr *addr)
913 int (*output)(struct sk_buff *))
914{ 929{
915 struct rt6_info *rt; 930 struct rt6_info *rt;
916 struct inet6_dev *idev = in6_dev_get(dev); 931 struct inet6_dev *idev = in6_dev_get(dev);
932 struct net *net = dev_net(dev);
917 933
918 if (unlikely(idev == NULL)) 934 if (unlikely(idev == NULL))
919 return NULL; 935 return NULL;
920 936
921 rt = ip6_dst_alloc(); 937 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
922 if (unlikely(rt == NULL)) { 938 if (unlikely(rt == NULL)) {
923 in6_dev_put(idev); 939 in6_dev_put(idev);
924 goto out; 940 goto out;
@@ -936,8 +952,8 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
936 atomic_set(&rt->u.dst.__refcnt, 1); 952 atomic_set(&rt->u.dst.__refcnt, 1);
937 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; 953 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
938 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); 954 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
939 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); 955 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
940 rt->u.dst.output = output; 956 rt->u.dst.output = ip6_output;
941 957
942#if 0 /* there's no chance to use these for ndisc */ 958#if 0 /* there's no chance to use these for ndisc */
943 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST 959 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
@@ -947,18 +963,18 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
947 rt->rt6i_dst.plen = 128; 963 rt->rt6i_dst.plen = 128;
948#endif 964#endif
949 965
950 spin_lock_bh(&ndisc_lock); 966 spin_lock_bh(&icmp6_dst_lock);
951 rt->u.dst.next = ndisc_dst_gc_list; 967 rt->u.dst.next = icmp6_dst_gc_list;
952 ndisc_dst_gc_list = &rt->u.dst; 968 icmp6_dst_gc_list = &rt->u.dst;
953 spin_unlock_bh(&ndisc_lock); 969 spin_unlock_bh(&icmp6_dst_lock);
954 970
955 fib6_force_start_gc(); 971 fib6_force_start_gc(net);
956 972
957out: 973out:
958 return &rt->u.dst; 974 return &rt->u.dst;
959} 975}
960 976
961int ndisc_dst_gc(int *more) 977int icmp6_dst_gc(int *more)
962{ 978{
963 struct dst_entry *dst, *next, **pprev; 979 struct dst_entry *dst, *next, **pprev;
964 int freed; 980 int freed;
@@ -966,8 +982,8 @@ int ndisc_dst_gc(int *more)
966 next = NULL; 982 next = NULL;
967 freed = 0; 983 freed = 0;
968 984
969 spin_lock_bh(&ndisc_lock); 985 spin_lock_bh(&icmp6_dst_lock);
970 pprev = &ndisc_dst_gc_list; 986 pprev = &icmp6_dst_gc_list;
971 987
972 while ((dst = *pprev) != NULL) { 988 while ((dst = *pprev) != NULL) {
973 if (!atomic_read(&dst->__refcnt)) { 989 if (!atomic_read(&dst->__refcnt)) {
@@ -980,30 +996,33 @@ int ndisc_dst_gc(int *more)
980 } 996 }
981 } 997 }
982 998
983 spin_unlock_bh(&ndisc_lock); 999 spin_unlock_bh(&icmp6_dst_lock);
984 1000
985 return freed; 1001 return freed;
986} 1002}
987 1003
988static int ip6_dst_gc(struct dst_ops *ops) 1004static int ip6_dst_gc(struct dst_ops *ops)
989{ 1005{
990 static unsigned expire = 30*HZ;
991 static unsigned long last_gc;
992 unsigned long now = jiffies; 1006 unsigned long now = jiffies;
993 1007 struct net *net = ops->dst_net;
994 if (time_after(last_gc + init_net.ipv6.sysctl.ip6_rt_gc_min_interval, now) && 1008 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
995 atomic_read(&ip6_dst_ops.entries) <= init_net.ipv6.sysctl.ip6_rt_max_size) 1009 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1010 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1011 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1012 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1013
1014 if (time_after(rt_last_gc + rt_min_interval, now) &&
1015 atomic_read(&ops->entries) <= rt_max_size)
996 goto out; 1016 goto out;
997 1017
998 expire++; 1018 net->ipv6.ip6_rt_gc_expire++;
999 fib6_run_gc(expire); 1019 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1000 last_gc = now; 1020 net->ipv6.ip6_rt_last_gc = now;
1001 if (atomic_read(&ip6_dst_ops.entries) < ip6_dst_ops.gc_thresh) 1021 if (atomic_read(&ops->entries) < ops->gc_thresh)
1002 expire = init_net.ipv6.sysctl.ip6_rt_gc_timeout>>1; 1022 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1003
1004out: 1023out:
1005 expire -= expire>>init_net.ipv6.sysctl.ip6_rt_gc_elasticity; 1024 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1006 return (atomic_read(&ip6_dst_ops.entries) > init_net.ipv6.sysctl.ip6_rt_max_size); 1025 return (atomic_read(&ops->entries) > rt_max_size);
1007} 1026}
1008 1027
1009/* Clean host part of a prefix. Not necessary in radix tree, 1028/* Clean host part of a prefix. Not necessary in radix tree,
@@ -1025,15 +1044,17 @@ static int ipv6_get_mtu(struct net_device *dev)
1025 return mtu; 1044 return mtu;
1026} 1045}
1027 1046
1028int ipv6_get_hoplimit(struct net_device *dev) 1047int ip6_dst_hoplimit(struct dst_entry *dst)
1029{ 1048{
1030 int hoplimit = ipv6_devconf.hop_limit; 1049 int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
1031 struct inet6_dev *idev; 1050 if (hoplimit < 0) {
1032 1051 struct net_device *dev = dst->dev;
1033 idev = in6_dev_get(dev); 1052 struct inet6_dev *idev = in6_dev_get(dev);
1034 if (idev) { 1053 if (idev) {
1035 hoplimit = idev->cnf.hop_limit; 1054 hoplimit = idev->cnf.hop_limit;
1036 in6_dev_put(idev); 1055 in6_dev_put(idev);
1056 } else
1057 hoplimit = ipv6_devconf.hop_limit;
1037 } 1058 }
1038 return hoplimit; 1059 return hoplimit;
1039} 1060}
@@ -1045,6 +1066,7 @@ int ipv6_get_hoplimit(struct net_device *dev)
1045int ip6_route_add(struct fib6_config *cfg) 1066int ip6_route_add(struct fib6_config *cfg)
1046{ 1067{
1047 int err; 1068 int err;
1069 struct net *net = cfg->fc_nlinfo.nl_net;
1048 struct rt6_info *rt = NULL; 1070 struct rt6_info *rt = NULL;
1049 struct net_device *dev = NULL; 1071 struct net_device *dev = NULL;
1050 struct inet6_dev *idev = NULL; 1072 struct inet6_dev *idev = NULL;
@@ -1059,7 +1081,7 @@ int ip6_route_add(struct fib6_config *cfg)
1059#endif 1081#endif
1060 if (cfg->fc_ifindex) { 1082 if (cfg->fc_ifindex) {
1061 err = -ENODEV; 1083 err = -ENODEV;
1062 dev = dev_get_by_index(&init_net, cfg->fc_ifindex); 1084 dev = dev_get_by_index(net, cfg->fc_ifindex);
1063 if (!dev) 1085 if (!dev)
1064 goto out; 1086 goto out;
1065 idev = in6_dev_get(dev); 1087 idev = in6_dev_get(dev);
@@ -1070,13 +1092,13 @@ int ip6_route_add(struct fib6_config *cfg)
1070 if (cfg->fc_metric == 0) 1092 if (cfg->fc_metric == 0)
1071 cfg->fc_metric = IP6_RT_PRIO_USER; 1093 cfg->fc_metric = IP6_RT_PRIO_USER;
1072 1094
1073 table = fib6_new_table(cfg->fc_table); 1095 table = fib6_new_table(net, cfg->fc_table);
1074 if (table == NULL) { 1096 if (table == NULL) {
1075 err = -ENOBUFS; 1097 err = -ENOBUFS;
1076 goto out; 1098 goto out;
1077 } 1099 }
1078 1100
1079 rt = ip6_dst_alloc(); 1101 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1080 1102
1081 if (rt == NULL) { 1103 if (rt == NULL) {
1082 err = -ENOMEM; 1104 err = -ENOMEM;
@@ -1117,12 +1139,12 @@ int ip6_route_add(struct fib6_config *cfg)
1117 if ((cfg->fc_flags & RTF_REJECT) || 1139 if ((cfg->fc_flags & RTF_REJECT) ||
1118 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) { 1140 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1119 /* hold loopback dev/idev if we haven't done so. */ 1141 /* hold loopback dev/idev if we haven't done so. */
1120 if (dev != init_net.loopback_dev) { 1142 if (dev != net->loopback_dev) {
1121 if (dev) { 1143 if (dev) {
1122 dev_put(dev); 1144 dev_put(dev);
1123 in6_dev_put(idev); 1145 in6_dev_put(idev);
1124 } 1146 }
1125 dev = init_net.loopback_dev; 1147 dev = net->loopback_dev;
1126 dev_hold(dev); 1148 dev_hold(dev);
1127 idev = in6_dev_get(dev); 1149 idev = in6_dev_get(dev);
1128 if (!idev) { 1150 if (!idev) {
@@ -1159,7 +1181,7 @@ int ip6_route_add(struct fib6_config *cfg)
1159 if (!(gwa_type&IPV6_ADDR_UNICAST)) 1181 if (!(gwa_type&IPV6_ADDR_UNICAST))
1160 goto out; 1182 goto out;
1161 1183
1162 grt = rt6_lookup(gw_addr, NULL, cfg->fc_ifindex, 1); 1184 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1163 1185
1164 err = -EHOSTUNREACH; 1186 err = -EHOSTUNREACH;
1165 if (grt == NULL) 1187 if (grt == NULL)
@@ -1226,10 +1248,13 @@ install_route:
1226 if (!rt->u.dst.metrics[RTAX_MTU-1]) 1248 if (!rt->u.dst.metrics[RTAX_MTU-1])
1227 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); 1249 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1228 if (!rt->u.dst.metrics[RTAX_ADVMSS-1]) 1250 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1229 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); 1251 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1230 rt->u.dst.dev = dev; 1252 rt->u.dst.dev = dev;
1231 rt->rt6i_idev = idev; 1253 rt->rt6i_idev = idev;
1232 rt->rt6i_table = table; 1254 rt->rt6i_table = table;
1255
1256 cfg->fc_nlinfo.nl_net = dev_net(dev);
1257
1233 return __ip6_ins_rt(rt, &cfg->fc_nlinfo); 1258 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1234 1259
1235out: 1260out:
@@ -1246,8 +1271,9 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1246{ 1271{
1247 int err; 1272 int err;
1248 struct fib6_table *table; 1273 struct fib6_table *table;
1274 struct net *net = dev_net(rt->rt6i_dev);
1249 1275
1250 if (rt == &ip6_null_entry) 1276 if (rt == net->ipv6.ip6_null_entry)
1251 return -ENOENT; 1277 return -ENOENT;
1252 1278
1253 table = rt->rt6i_table; 1279 table = rt->rt6i_table;
@@ -1264,7 +1290,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1264int ip6_del_rt(struct rt6_info *rt) 1290int ip6_del_rt(struct rt6_info *rt)
1265{ 1291{
1266 struct nl_info info = { 1292 struct nl_info info = {
1267 .nl_net = &init_net, 1293 .nl_net = dev_net(rt->rt6i_dev),
1268 }; 1294 };
1269 return __ip6_del_rt(rt, &info); 1295 return __ip6_del_rt(rt, &info);
1270} 1296}
@@ -1276,7 +1302,7 @@ static int ip6_route_del(struct fib6_config *cfg)
1276 struct rt6_info *rt; 1302 struct rt6_info *rt;
1277 int err = -ESRCH; 1303 int err = -ESRCH;
1278 1304
1279 table = fib6_get_table(cfg->fc_table); 1305 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1280 if (table == NULL) 1306 if (table == NULL)
1281 return err; 1307 return err;
1282 1308
@@ -1316,7 +1342,8 @@ struct ip6rd_flowi {
1316 struct in6_addr gateway; 1342 struct in6_addr gateway;
1317}; 1343};
1318 1344
1319static struct rt6_info *__ip6_route_redirect(struct fib6_table *table, 1345static struct rt6_info *__ip6_route_redirect(struct net *net,
1346 struct fib6_table *table,
1320 struct flowi *fl, 1347 struct flowi *fl,
1321 int flags) 1348 int flags)
1322{ 1349{
@@ -1359,8 +1386,8 @@ restart:
1359 } 1386 }
1360 1387
1361 if (!rt) 1388 if (!rt)
1362 rt = &ip6_null_entry; 1389 rt = net->ipv6.ip6_null_entry;
1363 BACKTRACK(&fl->fl6_src); 1390 BACKTRACK(net, &fl->fl6_src);
1364out: 1391out:
1365 dst_hold(&rt->u.dst); 1392 dst_hold(&rt->u.dst);
1366 1393
@@ -1375,6 +1402,7 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1375 struct net_device *dev) 1402 struct net_device *dev)
1376{ 1403{
1377 int flags = RT6_LOOKUP_F_HAS_SADDR; 1404 int flags = RT6_LOOKUP_F_HAS_SADDR;
1405 struct net *net = dev_net(dev);
1378 struct ip6rd_flowi rdfl = { 1406 struct ip6rd_flowi rdfl = {
1379 .fl = { 1407 .fl = {
1380 .oif = dev->ifindex, 1408 .oif = dev->ifindex,
@@ -1391,7 +1419,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1391 if (rt6_need_strict(dest)) 1419 if (rt6_need_strict(dest))
1392 flags |= RT6_LOOKUP_F_IFACE; 1420 flags |= RT6_LOOKUP_F_IFACE;
1393 1421
1394 return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect); 1422 return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl,
1423 flags, __ip6_route_redirect);
1395} 1424}
1396 1425
1397void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, 1426void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
@@ -1400,10 +1429,11 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1400{ 1429{
1401 struct rt6_info *rt, *nrt = NULL; 1430 struct rt6_info *rt, *nrt = NULL;
1402 struct netevent_redirect netevent; 1431 struct netevent_redirect netevent;
1432 struct net *net = dev_net(neigh->dev);
1403 1433
1404 rt = ip6_route_redirect(dest, src, saddr, neigh->dev); 1434 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1405 1435
1406 if (rt == &ip6_null_entry) { 1436 if (rt == net->ipv6.ip6_null_entry) {
1407 if (net_ratelimit()) 1437 if (net_ratelimit())
1408 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop " 1438 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1409 "for redirect target\n"); 1439 "for redirect target\n");
@@ -1448,7 +1478,8 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1448 nrt->rt6i_nexthop = neigh_clone(neigh); 1478 nrt->rt6i_nexthop = neigh_clone(neigh);
1449 /* Reset pmtu, it may be better */ 1479 /* Reset pmtu, it may be better */
1450 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); 1480 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1451 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst)); 1481 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
1482 dst_mtu(&nrt->u.dst));
1452 1483
1453 if (ip6_ins_rt(nrt)) 1484 if (ip6_ins_rt(nrt))
1454 goto out; 1485 goto out;
@@ -1476,9 +1507,10 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1476 struct net_device *dev, u32 pmtu) 1507 struct net_device *dev, u32 pmtu)
1477{ 1508{
1478 struct rt6_info *rt, *nrt; 1509 struct rt6_info *rt, *nrt;
1510 struct net *net = dev_net(dev);
1479 int allfrag = 0; 1511 int allfrag = 0;
1480 1512
1481 rt = rt6_lookup(daddr, saddr, dev->ifindex, 0); 1513 rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
1482 if (rt == NULL) 1514 if (rt == NULL)
1483 return; 1515 return;
1484 1516
@@ -1511,7 +1543,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1511 rt->u.dst.metrics[RTAX_MTU-1] = pmtu; 1543 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1512 if (allfrag) 1544 if (allfrag)
1513 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 1545 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1514 dst_set_expires(&rt->u.dst, init_net.ipv6.sysctl.ip6_rt_mtu_expires); 1546 dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1515 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; 1547 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1516 goto out; 1548 goto out;
1517 } 1549 }
@@ -1537,7 +1569,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1537 * which is 10 mins. After 10 mins the decreased pmtu is expired 1569 * which is 10 mins. After 10 mins the decreased pmtu is expired
1538 * and detecting PMTU increase will be automatically happened. 1570 * and detecting PMTU increase will be automatically happened.
1539 */ 1571 */
1540 dst_set_expires(&nrt->u.dst, init_net.ipv6.sysctl.ip6_rt_mtu_expires); 1572 dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1541 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; 1573 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1542 1574
1543 ip6_ins_rt(nrt); 1575 ip6_ins_rt(nrt);
@@ -1552,7 +1584,8 @@ out:
1552 1584
1553static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) 1585static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1554{ 1586{
1555 struct rt6_info *rt = ip6_dst_alloc(); 1587 struct net *net = dev_net(ort->rt6i_dev);
1588 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1556 1589
1557 if (rt) { 1590 if (rt) {
1558 rt->u.dst.input = ort->u.dst.input; 1591 rt->u.dst.input = ort->u.dst.input;
@@ -1583,14 +1616,15 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1583} 1616}
1584 1617
1585#ifdef CONFIG_IPV6_ROUTE_INFO 1618#ifdef CONFIG_IPV6_ROUTE_INFO
1586static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen, 1619static struct rt6_info *rt6_get_route_info(struct net *net,
1620 struct in6_addr *prefix, int prefixlen,
1587 struct in6_addr *gwaddr, int ifindex) 1621 struct in6_addr *gwaddr, int ifindex)
1588{ 1622{
1589 struct fib6_node *fn; 1623 struct fib6_node *fn;
1590 struct rt6_info *rt = NULL; 1624 struct rt6_info *rt = NULL;
1591 struct fib6_table *table; 1625 struct fib6_table *table;
1592 1626
1593 table = fib6_get_table(RT6_TABLE_INFO); 1627 table = fib6_get_table(net, RT6_TABLE_INFO);
1594 if (table == NULL) 1628 if (table == NULL)
1595 return NULL; 1629 return NULL;
1596 1630
@@ -1614,7 +1648,8 @@ out:
1614 return rt; 1648 return rt;
1615} 1649}
1616 1650
1617static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen, 1651static struct rt6_info *rt6_add_route_info(struct net *net,
1652 struct in6_addr *prefix, int prefixlen,
1618 struct in6_addr *gwaddr, int ifindex, 1653 struct in6_addr *gwaddr, int ifindex,
1619 unsigned pref) 1654 unsigned pref)
1620{ 1655{
@@ -1625,6 +1660,9 @@ static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixle
1625 .fc_dst_len = prefixlen, 1660 .fc_dst_len = prefixlen,
1626 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 1661 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1627 RTF_UP | RTF_PREF(pref), 1662 RTF_UP | RTF_PREF(pref),
1663 .fc_nlinfo.pid = 0,
1664 .fc_nlinfo.nlh = NULL,
1665 .fc_nlinfo.nl_net = net,
1628 }; 1666 };
1629 1667
1630 ipv6_addr_copy(&cfg.fc_dst, prefix); 1668 ipv6_addr_copy(&cfg.fc_dst, prefix);
@@ -1636,7 +1674,7 @@ static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixle
1636 1674
1637 ip6_route_add(&cfg); 1675 ip6_route_add(&cfg);
1638 1676
1639 return rt6_get_route_info(prefix, prefixlen, gwaddr, ifindex); 1677 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1640} 1678}
1641#endif 1679#endif
1642 1680
@@ -1645,7 +1683,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1645 struct rt6_info *rt; 1683 struct rt6_info *rt;
1646 struct fib6_table *table; 1684 struct fib6_table *table;
1647 1685
1648 table = fib6_get_table(RT6_TABLE_DFLT); 1686 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1649 if (table == NULL) 1687 if (table == NULL)
1650 return NULL; 1688 return NULL;
1651 1689
@@ -1662,8 +1700,6 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1662 return rt; 1700 return rt;
1663} 1701}
1664 1702
1665EXPORT_SYMBOL(rt6_get_dflt_router);
1666
1667struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr, 1703struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1668 struct net_device *dev, 1704 struct net_device *dev,
1669 unsigned int pref) 1705 unsigned int pref)
@@ -1674,6 +1710,9 @@ struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1674 .fc_ifindex = dev->ifindex, 1710 .fc_ifindex = dev->ifindex,
1675 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 1711 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1676 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 1712 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1713 .fc_nlinfo.pid = 0,
1714 .fc_nlinfo.nlh = NULL,
1715 .fc_nlinfo.nl_net = dev_net(dev),
1677 }; 1716 };
1678 1717
1679 ipv6_addr_copy(&cfg.fc_gateway, gwaddr); 1718 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
@@ -1683,13 +1722,13 @@ struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1683 return rt6_get_dflt_router(gwaddr, dev); 1722 return rt6_get_dflt_router(gwaddr, dev);
1684} 1723}
1685 1724
1686void rt6_purge_dflt_routers(void) 1725void rt6_purge_dflt_routers(struct net *net)
1687{ 1726{
1688 struct rt6_info *rt; 1727 struct rt6_info *rt;
1689 struct fib6_table *table; 1728 struct fib6_table *table;
1690 1729
1691 /* NOTE: Keep consistent with rt6_get_dflt_router */ 1730 /* NOTE: Keep consistent with rt6_get_dflt_router */
1692 table = fib6_get_table(RT6_TABLE_DFLT); 1731 table = fib6_get_table(net, RT6_TABLE_DFLT);
1693 if (table == NULL) 1732 if (table == NULL)
1694 return; 1733 return;
1695 1734
@@ -1706,7 +1745,8 @@ restart:
1706 read_unlock_bh(&table->tb6_lock); 1745 read_unlock_bh(&table->tb6_lock);
1707} 1746}
1708 1747
1709static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg, 1748static void rtmsg_to_fib6_config(struct net *net,
1749 struct in6_rtmsg *rtmsg,
1710 struct fib6_config *cfg) 1750 struct fib6_config *cfg)
1711{ 1751{
1712 memset(cfg, 0, sizeof(*cfg)); 1752 memset(cfg, 0, sizeof(*cfg));
@@ -1719,14 +1759,14 @@ static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg,
1719 cfg->fc_src_len = rtmsg->rtmsg_src_len; 1759 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1720 cfg->fc_flags = rtmsg->rtmsg_flags; 1760 cfg->fc_flags = rtmsg->rtmsg_flags;
1721 1761
1722 cfg->fc_nlinfo.nl_net = &init_net; 1762 cfg->fc_nlinfo.nl_net = net;
1723 1763
1724 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst); 1764 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1725 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src); 1765 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1726 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway); 1766 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1727} 1767}
1728 1768
1729int ipv6_route_ioctl(unsigned int cmd, void __user *arg) 1769int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1730{ 1770{
1731 struct fib6_config cfg; 1771 struct fib6_config cfg;
1732 struct in6_rtmsg rtmsg; 1772 struct in6_rtmsg rtmsg;
@@ -1742,7 +1782,7 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1742 if (err) 1782 if (err)
1743 return -EFAULT; 1783 return -EFAULT;
1744 1784
1745 rtmsg_to_fib6_config(&rtmsg, &cfg); 1785 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1746 1786
1747 rtnl_lock(); 1787 rtnl_lock();
1748 switch (cmd) { 1788 switch (cmd) {
@@ -1821,21 +1861,22 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1821 const struct in6_addr *addr, 1861 const struct in6_addr *addr,
1822 int anycast) 1862 int anycast)
1823{ 1863{
1824 struct rt6_info *rt = ip6_dst_alloc(); 1864 struct net *net = dev_net(idev->dev);
1865 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1825 1866
1826 if (rt == NULL) 1867 if (rt == NULL)
1827 return ERR_PTR(-ENOMEM); 1868 return ERR_PTR(-ENOMEM);
1828 1869
1829 dev_hold(init_net.loopback_dev); 1870 dev_hold(net->loopback_dev);
1830 in6_dev_hold(idev); 1871 in6_dev_hold(idev);
1831 1872
1832 rt->u.dst.flags = DST_HOST; 1873 rt->u.dst.flags = DST_HOST;
1833 rt->u.dst.input = ip6_input; 1874 rt->u.dst.input = ip6_input;
1834 rt->u.dst.output = ip6_output; 1875 rt->u.dst.output = ip6_output;
1835 rt->rt6i_dev = init_net.loopback_dev; 1876 rt->rt6i_dev = net->loopback_dev;
1836 rt->rt6i_idev = idev; 1877 rt->rt6i_idev = idev;
1837 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); 1878 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1838 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); 1879 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1839 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; 1880 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1840 rt->u.dst.obsolete = -1; 1881 rt->u.dst.obsolete = -1;
1841 1882
@@ -1852,26 +1893,39 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1852 1893
1853 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1894 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1854 rt->rt6i_dst.plen = 128; 1895 rt->rt6i_dst.plen = 128;
1855 rt->rt6i_table = fib6_get_table(RT6_TABLE_LOCAL); 1896 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
1856 1897
1857 atomic_set(&rt->u.dst.__refcnt, 1); 1898 atomic_set(&rt->u.dst.__refcnt, 1);
1858 1899
1859 return rt; 1900 return rt;
1860} 1901}
1861 1902
1903struct arg_dev_net {
1904 struct net_device *dev;
1905 struct net *net;
1906};
1907
1862static int fib6_ifdown(struct rt6_info *rt, void *arg) 1908static int fib6_ifdown(struct rt6_info *rt, void *arg)
1863{ 1909{
1864 if (((void*)rt->rt6i_dev == arg || arg == NULL) && 1910 struct net_device *dev = ((struct arg_dev_net *)arg)->dev;
1865 rt != &ip6_null_entry) { 1911 struct net *net = ((struct arg_dev_net *)arg)->net;
1912
1913 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
1914 rt != net->ipv6.ip6_null_entry) {
1866 RT6_TRACE("deleted by ifdown %p\n", rt); 1915 RT6_TRACE("deleted by ifdown %p\n", rt);
1867 return -1; 1916 return -1;
1868 } 1917 }
1869 return 0; 1918 return 0;
1870} 1919}
1871 1920
1872void rt6_ifdown(struct net_device *dev) 1921void rt6_ifdown(struct net *net, struct net_device *dev)
1873{ 1922{
1874 fib6_clean_all(fib6_ifdown, 0, dev); 1923 struct arg_dev_net adn = {
1924 .dev = dev,
1925 .net = net,
1926 };
1927
1928 fib6_clean_all(net, fib6_ifdown, 0, &adn);
1875} 1929}
1876 1930
1877struct rt6_mtu_change_arg 1931struct rt6_mtu_change_arg
@@ -1884,6 +1938,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1884{ 1938{
1885 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; 1939 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1886 struct inet6_dev *idev; 1940 struct inet6_dev *idev;
1941 struct net *net = dev_net(arg->dev);
1887 1942
1888 /* In IPv6 pmtu discovery is not optional, 1943 /* In IPv6 pmtu discovery is not optional,
1889 so that RTAX_MTU lock cannot disable it. 1944 so that RTAX_MTU lock cannot disable it.
@@ -1915,7 +1970,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1915 (dst_mtu(&rt->u.dst) < arg->mtu && 1970 (dst_mtu(&rt->u.dst) < arg->mtu &&
1916 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) { 1971 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
1917 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; 1972 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1918 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu); 1973 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
1919 } 1974 }
1920 return 0; 1975 return 0;
1921} 1976}
@@ -1927,7 +1982,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1927 .mtu = mtu, 1982 .mtu = mtu,
1928 }; 1983 };
1929 1984
1930 fib6_clean_all(rt6_mtu_change_route, 0, &arg); 1985 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
1931} 1986}
1932 1987
1933static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 1988static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
@@ -1964,7 +2019,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1964 2019
1965 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid; 2020 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
1966 cfg->fc_nlinfo.nlh = nlh; 2021 cfg->fc_nlinfo.nlh = nlh;
1967 cfg->fc_nlinfo.nl_net = skb->sk->sk_net; 2022 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
1968 2023
1969 if (tb[RTA_GATEWAY]) { 2024 if (tb[RTA_GATEWAY]) {
1970 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16); 2025 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
@@ -2010,13 +2065,9 @@ errout:
2010 2065
2011static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 2066static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2012{ 2067{
2013 struct net *net = skb->sk->sk_net;
2014 struct fib6_config cfg; 2068 struct fib6_config cfg;
2015 int err; 2069 int err;
2016 2070
2017 if (net != &init_net)
2018 return -EINVAL;
2019
2020 err = rtm_to_fib6_config(skb, nlh, &cfg); 2071 err = rtm_to_fib6_config(skb, nlh, &cfg);
2021 if (err < 0) 2072 if (err < 0)
2022 return err; 2073 return err;
@@ -2026,13 +2077,9 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a
2026 2077
2027static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 2078static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2028{ 2079{
2029 struct net *net = skb->sk->sk_net;
2030 struct fib6_config cfg; 2080 struct fib6_config cfg;
2031 int err; 2081 int err;
2032 2082
2033 if (net != &init_net)
2034 return -EINVAL;
2035
2036 err = rtm_to_fib6_config(skb, nlh, &cfg); 2083 err = rtm_to_fib6_config(skb, nlh, &cfg);
2037 if (err < 0) 2084 if (err < 0)
2038 return err; 2085 return err;
@@ -2058,7 +2105,7 @@ static inline size_t rt6_nlmsg_size(void)
2058static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, 2105static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2059 struct in6_addr *dst, struct in6_addr *src, 2106 struct in6_addr *dst, struct in6_addr *src,
2060 int iif, int type, u32 pid, u32 seq, 2107 int iif, int type, u32 pid, u32 seq,
2061 int prefix, unsigned int flags) 2108 int prefix, int nowait, unsigned int flags)
2062{ 2109{
2063 struct rtmsg *rtm; 2110 struct rtmsg *rtm;
2064 struct nlmsghdr *nlh; 2111 struct nlmsghdr *nlh;
@@ -2118,11 +2165,27 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2118 } else if (rtm->rtm_src_len) 2165 } else if (rtm->rtm_src_len)
2119 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr); 2166 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2120#endif 2167#endif
2121 if (iif) 2168 if (iif) {
2122 NLA_PUT_U32(skb, RTA_IIF, iif); 2169#ifdef CONFIG_IPV6_MROUTE
2123 else if (dst) { 2170 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2171 int err = ip6mr_get_route(skb, rtm, nowait);
2172 if (err <= 0) {
2173 if (!nowait) {
2174 if (err == 0)
2175 return 0;
2176 goto nla_put_failure;
2177 } else {
2178 if (err == -EMSGSIZE)
2179 goto nla_put_failure;
2180 }
2181 }
2182 } else
2183#endif
2184 NLA_PUT_U32(skb, RTA_IIF, iif);
2185 } else if (dst) {
2124 struct in6_addr saddr_buf; 2186 struct in6_addr saddr_buf;
2125 if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0) 2187 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
2188 dst, 0, &saddr_buf) == 0)
2126 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2189 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2127 } 2190 }
2128 2191
@@ -2162,12 +2225,12 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2162 2225
2163 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, 2226 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2164 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq, 2227 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2165 prefix, NLM_F_MULTI); 2228 prefix, 0, NLM_F_MULTI);
2166} 2229}
2167 2230
2168static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) 2231static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2169{ 2232{
2170 struct net *net = in_skb->sk->sk_net; 2233 struct net *net = sock_net(in_skb->sk);
2171 struct nlattr *tb[RTA_MAX+1]; 2234 struct nlattr *tb[RTA_MAX+1];
2172 struct rt6_info *rt; 2235 struct rt6_info *rt;
2173 struct sk_buff *skb; 2236 struct sk_buff *skb;
@@ -2175,9 +2238,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2175 struct flowi fl; 2238 struct flowi fl;
2176 int err, iif = 0; 2239 int err, iif = 0;
2177 2240
2178 if (net != &init_net)
2179 return -EINVAL;
2180
2181 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); 2241 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2182 if (err < 0) 2242 if (err < 0)
2183 goto errout; 2243 goto errout;
@@ -2207,7 +2267,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2207 2267
2208 if (iif) { 2268 if (iif) {
2209 struct net_device *dev; 2269 struct net_device *dev;
2210 dev = __dev_get_by_index(&init_net, iif); 2270 dev = __dev_get_by_index(net, iif);
2211 if (!dev) { 2271 if (!dev) {
2212 err = -ENODEV; 2272 err = -ENODEV;
2213 goto errout; 2273 goto errout;
@@ -2226,18 +2286,18 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2226 skb_reset_mac_header(skb); 2286 skb_reset_mac_header(skb);
2227 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); 2287 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2228 2288
2229 rt = (struct rt6_info*) ip6_route_output(NULL, &fl); 2289 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
2230 skb->dst = &rt->u.dst; 2290 skb->dst = &rt->u.dst;
2231 2291
2232 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, 2292 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2233 RTM_NEWROUTE, NETLINK_CB(in_skb).pid, 2293 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2234 nlh->nlmsg_seq, 0, 0); 2294 nlh->nlmsg_seq, 0, 0, 0);
2235 if (err < 0) { 2295 if (err < 0) {
2236 kfree_skb(skb); 2296 kfree_skb(skb);
2237 goto errout; 2297 goto errout;
2238 } 2298 }
2239 2299
2240 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 2300 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2241errout: 2301errout:
2242 return err; 2302 return err;
2243} 2303}
@@ -2245,6 +2305,7 @@ errout:
2245void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) 2305void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2246{ 2306{
2247 struct sk_buff *skb; 2307 struct sk_buff *skb;
2308 struct net *net = info->nl_net;
2248 u32 seq; 2309 u32 seq;
2249 int err; 2310 int err;
2250 2311
@@ -2256,18 +2317,38 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2256 goto errout; 2317 goto errout;
2257 2318
2258 err = rt6_fill_node(skb, rt, NULL, NULL, 0, 2319 err = rt6_fill_node(skb, rt, NULL, NULL, 0,
2259 event, info->pid, seq, 0, 0); 2320 event, info->pid, seq, 0, 0, 0);
2260 if (err < 0) { 2321 if (err < 0) {
2261 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 2322 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2262 WARN_ON(err == -EMSGSIZE); 2323 WARN_ON(err == -EMSGSIZE);
2263 kfree_skb(skb); 2324 kfree_skb(skb);
2264 goto errout; 2325 goto errout;
2265 } 2326 }
2266 err = rtnl_notify(skb, &init_net, info->pid, 2327 err = rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2267 RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); 2328 info->nlh, gfp_any());
2268errout: 2329errout:
2269 if (err < 0) 2330 if (err < 0)
2270 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_ROUTE, err); 2331 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2332}
2333
2334static int ip6_route_dev_notify(struct notifier_block *this,
2335 unsigned long event, void *data)
2336{
2337 struct net_device *dev = (struct net_device *)data;
2338 struct net *net = dev_net(dev);
2339
2340 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2341 net->ipv6.ip6_null_entry->u.dst.dev = dev;
2342 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2343#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2344 net->ipv6.ip6_prohibit_entry->u.dst.dev = dev;
2345 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2346 net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev;
2347 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2348#endif
2349 }
2350
2351 return NOTIFY_OK;
2271} 2352}
2272 2353
2273/* 2354/*
@@ -2316,13 +2397,33 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2316 2397
2317static int ipv6_route_show(struct seq_file *m, void *v) 2398static int ipv6_route_show(struct seq_file *m, void *v)
2318{ 2399{
2319 fib6_clean_all(rt6_info_route, 0, m); 2400 struct net *net = (struct net *)m->private;
2401 fib6_clean_all(net, rt6_info_route, 0, m);
2320 return 0; 2402 return 0;
2321} 2403}
2322 2404
2323static int ipv6_route_open(struct inode *inode, struct file *file) 2405static int ipv6_route_open(struct inode *inode, struct file *file)
2324{ 2406{
2325 return single_open(file, ipv6_route_show, NULL); 2407 int err;
2408 struct net *net = get_proc_net(inode);
2409 if (!net)
2410 return -ENXIO;
2411
2412 err = single_open(file, ipv6_route_show, net);
2413 if (err < 0) {
2414 put_net(net);
2415 return err;
2416 }
2417
2418 return 0;
2419}
2420
2421static int ipv6_route_release(struct inode *inode, struct file *file)
2422{
2423 struct seq_file *seq = file->private_data;
2424 struct net *net = seq->private;
2425 put_net(net);
2426 return single_release(inode, file);
2326} 2427}
2327 2428
2328static const struct file_operations ipv6_route_proc_fops = { 2429static const struct file_operations ipv6_route_proc_fops = {
@@ -2330,24 +2431,46 @@ static const struct file_operations ipv6_route_proc_fops = {
2330 .open = ipv6_route_open, 2431 .open = ipv6_route_open,
2331 .read = seq_read, 2432 .read = seq_read,
2332 .llseek = seq_lseek, 2433 .llseek = seq_lseek,
2333 .release = single_release, 2434 .release = ipv6_route_release,
2334}; 2435};
2335 2436
2336static int rt6_stats_seq_show(struct seq_file *seq, void *v) 2437static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2337{ 2438{
2439 struct net *net = (struct net *)seq->private;
2338 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", 2440 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2339 rt6_stats.fib_nodes, rt6_stats.fib_route_nodes, 2441 net->ipv6.rt6_stats->fib_nodes,
2340 rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries, 2442 net->ipv6.rt6_stats->fib_route_nodes,
2341 rt6_stats.fib_rt_cache, 2443 net->ipv6.rt6_stats->fib_rt_alloc,
2342 atomic_read(&ip6_dst_ops.entries), 2444 net->ipv6.rt6_stats->fib_rt_entries,
2343 rt6_stats.fib_discarded_routes); 2445 net->ipv6.rt6_stats->fib_rt_cache,
2446 atomic_read(&net->ipv6.ip6_dst_ops->entries),
2447 net->ipv6.rt6_stats->fib_discarded_routes);
2344 2448
2345 return 0; 2449 return 0;
2346} 2450}
2347 2451
2348static int rt6_stats_seq_open(struct inode *inode, struct file *file) 2452static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2349{ 2453{
2350 return single_open(file, rt6_stats_seq_show, NULL); 2454 int err;
2455 struct net *net = get_proc_net(inode);
2456 if (!net)
2457 return -ENXIO;
2458
2459 err = single_open(file, rt6_stats_seq_show, net);
2460 if (err < 0) {
2461 put_net(net);
2462 return err;
2463 }
2464
2465 return 0;
2466}
2467
2468static int rt6_stats_seq_release(struct inode *inode, struct file *file)
2469{
2470 struct seq_file *seq = file->private_data;
2471 struct net *net = (struct net *)seq->private;
2472 put_net(net);
2473 return single_release(inode, file);
2351} 2474}
2352 2475
2353static const struct file_operations rt6_stats_seq_fops = { 2476static const struct file_operations rt6_stats_seq_fops = {
@@ -2355,42 +2478,8 @@ static const struct file_operations rt6_stats_seq_fops = {
2355 .open = rt6_stats_seq_open, 2478 .open = rt6_stats_seq_open,
2356 .read = seq_read, 2479 .read = seq_read,
2357 .llseek = seq_lseek, 2480 .llseek = seq_lseek,
2358 .release = single_release, 2481 .release = rt6_stats_seq_release,
2359}; 2482};
2360
2361static int ipv6_route_proc_init(struct net *net)
2362{
2363 int ret = -ENOMEM;
2364 if (!proc_net_fops_create(net, "ipv6_route",
2365 0, &ipv6_route_proc_fops))
2366 goto out;
2367
2368 if (!proc_net_fops_create(net, "rt6_stats",
2369 S_IRUGO, &rt6_stats_seq_fops))
2370 goto out_ipv6_route;
2371
2372 ret = 0;
2373out:
2374 return ret;
2375out_ipv6_route:
2376 proc_net_remove(net, "ipv6_route");
2377 goto out;
2378}
2379
2380static void ipv6_route_proc_fini(struct net *net)
2381{
2382 proc_net_remove(net, "ipv6_route");
2383 proc_net_remove(net, "rt6_stats");
2384}
2385#else
2386static inline int ipv6_route_proc_init(struct net *net)
2387{
2388 return 0;
2389}
2390static inline void ipv6_route_proc_fini(struct net *net)
2391{
2392 return ;
2393}
2394#endif /* CONFIG_PROC_FS */ 2483#endif /* CONFIG_PROC_FS */
2395 2484
2396#ifdef CONFIG_SYSCTL 2485#ifdef CONFIG_SYSCTL
@@ -2399,10 +2488,11 @@ static
2399int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp, 2488int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2400 void __user *buffer, size_t *lenp, loff_t *ppos) 2489 void __user *buffer, size_t *lenp, loff_t *ppos)
2401{ 2490{
2402 int delay = init_net.ipv6.sysctl.flush_delay; 2491 struct net *net = current->nsproxy->net_ns;
2492 int delay = net->ipv6.sysctl.flush_delay;
2403 if (write) { 2493 if (write) {
2404 proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 2494 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2405 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay); 2495 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2406 return 0; 2496 return 0;
2407 } else 2497 } else
2408 return -EINVAL; 2498 return -EINVAL;
@@ -2419,7 +2509,7 @@ ctl_table ipv6_route_table_template[] = {
2419 { 2509 {
2420 .ctl_name = NET_IPV6_ROUTE_GC_THRESH, 2510 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2421 .procname = "gc_thresh", 2511 .procname = "gc_thresh",
2422 .data = &ip6_dst_ops.gc_thresh, 2512 .data = &ip6_dst_ops_template.gc_thresh,
2423 .maxlen = sizeof(int), 2513 .maxlen = sizeof(int),
2424 .mode = 0644, 2514 .mode = 0644,
2425 .proc_handler = &proc_dointvec, 2515 .proc_handler = &proc_dointvec,
@@ -2505,33 +2595,143 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2505 table = kmemdup(ipv6_route_table_template, 2595 table = kmemdup(ipv6_route_table_template,
2506 sizeof(ipv6_route_table_template), 2596 sizeof(ipv6_route_table_template),
2507 GFP_KERNEL); 2597 GFP_KERNEL);
2598
2599 if (table) {
2600 table[0].data = &net->ipv6.sysctl.flush_delay;
2601 table[1].data = &net->ipv6.ip6_dst_ops->gc_thresh;
2602 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2603 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2604 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2605 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2606 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2607 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2608 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2609 }
2610
2508 return table; 2611 return table;
2509} 2612}
2510#endif 2613#endif
2511 2614
2615static int ip6_route_net_init(struct net *net)
2616{
2617 int ret = 0;
2618
2619 ret = -ENOMEM;
2620 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template,
2621 sizeof(*net->ipv6.ip6_dst_ops),
2622 GFP_KERNEL);
2623 if (!net->ipv6.ip6_dst_ops)
2624 goto out;
2625 net->ipv6.ip6_dst_ops->dst_net = hold_net(net);
2626
2627 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2628 sizeof(*net->ipv6.ip6_null_entry),
2629 GFP_KERNEL);
2630 if (!net->ipv6.ip6_null_entry)
2631 goto out_ip6_dst_ops;
2632 net->ipv6.ip6_null_entry->u.dst.path =
2633 (struct dst_entry *)net->ipv6.ip6_null_entry;
2634 net->ipv6.ip6_null_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2635
2636#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2637 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2638 sizeof(*net->ipv6.ip6_prohibit_entry),
2639 GFP_KERNEL);
2640 if (!net->ipv6.ip6_prohibit_entry) {
2641 kfree(net->ipv6.ip6_null_entry);
2642 goto out;
2643 }
2644 net->ipv6.ip6_prohibit_entry->u.dst.path =
2645 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2646 net->ipv6.ip6_prohibit_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2647
2648 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2649 sizeof(*net->ipv6.ip6_blk_hole_entry),
2650 GFP_KERNEL);
2651 if (!net->ipv6.ip6_blk_hole_entry) {
2652 kfree(net->ipv6.ip6_null_entry);
2653 kfree(net->ipv6.ip6_prohibit_entry);
2654 goto out;
2655 }
2656 net->ipv6.ip6_blk_hole_entry->u.dst.path =
2657 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2658 net->ipv6.ip6_blk_hole_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2659#endif
2660
2661#ifdef CONFIG_PROC_FS
2662 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2663 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2664#endif
2665 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2666
2667 ret = 0;
2668out:
2669 return ret;
2670
2671out_ip6_dst_ops:
2672 release_net(net->ipv6.ip6_dst_ops->dst_net);
2673 kfree(net->ipv6.ip6_dst_ops);
2674 goto out;
2675}
2676
2677static void ip6_route_net_exit(struct net *net)
2678{
2679#ifdef CONFIG_PROC_FS
2680 proc_net_remove(net, "ipv6_route");
2681 proc_net_remove(net, "rt6_stats");
2682#endif
2683 kfree(net->ipv6.ip6_null_entry);
2684#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2685 kfree(net->ipv6.ip6_prohibit_entry);
2686 kfree(net->ipv6.ip6_blk_hole_entry);
2687#endif
2688 release_net(net->ipv6.ip6_dst_ops->dst_net);
2689 kfree(net->ipv6.ip6_dst_ops);
2690}
2691
2692static struct pernet_operations ip6_route_net_ops = {
2693 .init = ip6_route_net_init,
2694 .exit = ip6_route_net_exit,
2695};
2696
2697static struct notifier_block ip6_route_dev_notifier = {
2698 .notifier_call = ip6_route_dev_notify,
2699 .priority = 0,
2700};
2701
2512int __init ip6_route_init(void) 2702int __init ip6_route_init(void)
2513{ 2703{
2514 int ret; 2704 int ret;
2515 2705
2516 ip6_dst_ops.kmem_cachep = 2706 ret = -ENOMEM;
2707 ip6_dst_ops_template.kmem_cachep =
2517 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2708 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2518 SLAB_HWCACHE_ALIGN, NULL); 2709 SLAB_HWCACHE_ALIGN, NULL);
2519 if (!ip6_dst_ops.kmem_cachep) 2710 if (!ip6_dst_ops_template.kmem_cachep)
2520 return -ENOMEM; 2711 goto out;;
2521
2522 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep;
2523 2712
2524 ret = fib6_init(); 2713 ret = register_pernet_subsys(&ip6_route_net_ops);
2525 if (ret) 2714 if (ret)
2526 goto out_kmem_cache; 2715 goto out_kmem_cache;
2527 2716
2528 ret = ipv6_route_proc_init(&init_net); 2717 /* Registering of the loopback is done before this portion of code,
2718 * the loopback reference in rt6_info will not be taken, do it
2719 * manually for init_net */
2720 init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev;
2721 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2722 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2723 init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev;
2724 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2725 init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev;
2726 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2727 #endif
2728 ret = fib6_init();
2529 if (ret) 2729 if (ret)
2530 goto out_fib6_init; 2730 goto out_register_subsys;
2531 2731
2532 ret = xfrm6_init(); 2732 ret = xfrm6_init();
2533 if (ret) 2733 if (ret)
2534 goto out_proc_init; 2734 goto out_fib6_init;
2535 2735
2536 ret = fib6_rules_init(); 2736 ret = fib6_rules_init();
2537 if (ret) 2737 if (ret)
@@ -2543,7 +2743,10 @@ int __init ip6_route_init(void)
2543 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL)) 2743 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2544 goto fib6_rules_init; 2744 goto fib6_rules_init;
2545 2745
2546 ret = 0; 2746 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2747 if (ret)
2748 goto fib6_rules_init;
2749
2547out: 2750out:
2548 return ret; 2751 return ret;
2549 2752
@@ -2551,22 +2754,21 @@ fib6_rules_init:
2551 fib6_rules_cleanup(); 2754 fib6_rules_cleanup();
2552xfrm6_init: 2755xfrm6_init:
2553 xfrm6_fini(); 2756 xfrm6_fini();
2554out_proc_init:
2555 ipv6_route_proc_fini(&init_net);
2556out_fib6_init: 2757out_fib6_init:
2557 rt6_ifdown(NULL);
2558 fib6_gc_cleanup(); 2758 fib6_gc_cleanup();
2759out_register_subsys:
2760 unregister_pernet_subsys(&ip6_route_net_ops);
2559out_kmem_cache: 2761out_kmem_cache:
2560 kmem_cache_destroy(ip6_dst_ops.kmem_cachep); 2762 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2561 goto out; 2763 goto out;
2562} 2764}
2563 2765
2564void ip6_route_cleanup(void) 2766void ip6_route_cleanup(void)
2565{ 2767{
2768 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2566 fib6_rules_cleanup(); 2769 fib6_rules_cleanup();
2567 ipv6_route_proc_fini(&init_net);
2568 xfrm6_fini(); 2770 xfrm6_fini();
2569 rt6_ifdown(NULL);
2570 fib6_gc_cleanup(); 2771 fib6_gc_cleanup();
2571 kmem_cache_destroy(ip6_dst_ops.kmem_cachep); 2772 unregister_pernet_subsys(&ip6_route_net_ops);
2773 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2572} 2774}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1656c003b989..4b2f1033994e 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -16,7 +16,7 @@
16 * Changes: 16 * Changes:
17 * Roger Venning <r.venning@telstra.com>: 6to4 support 17 * Roger Venning <r.venning@telstra.com>: 6to4 support
18 * Nate Thompson <nate@thebog.net>: 6to4 support 18 * Nate Thompson <nate@thebog.net>: 6to4 support
19 * Fred L. Templin <fltemplin@acm.org>: isatap support 19 * Fred Templin <fred.l.templin@boeing.com>: isatap support
20 */ 20 */
21 21
22#include <linux/module.h> 22#include <linux/module.h>
@@ -52,6 +52,8 @@
52#include <net/inet_ecn.h> 52#include <net/inet_ecn.h>
53#include <net/xfrm.h> 53#include <net/xfrm.h>
54#include <net/dsfield.h> 54#include <net/dsfield.h>
55#include <net/net_namespace.h>
56#include <net/netns/generic.h>
55 57
56/* 58/*
57 This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c 59 This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
@@ -66,41 +68,47 @@ static int ipip6_fb_tunnel_init(struct net_device *dev);
66static int ipip6_tunnel_init(struct net_device *dev); 68static int ipip6_tunnel_init(struct net_device *dev);
67static void ipip6_tunnel_setup(struct net_device *dev); 69static void ipip6_tunnel_setup(struct net_device *dev);
68 70
69static struct net_device *ipip6_fb_tunnel_dev; 71static int sit_net_id;
72struct sit_net {
73 struct ip_tunnel *tunnels_r_l[HASH_SIZE];
74 struct ip_tunnel *tunnels_r[HASH_SIZE];
75 struct ip_tunnel *tunnels_l[HASH_SIZE];
76 struct ip_tunnel *tunnels_wc[1];
77 struct ip_tunnel **tunnels[4];
70 78
71static struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 79 struct net_device *fb_tunnel_dev;
72static struct ip_tunnel *tunnels_r[HASH_SIZE]; 80};
73static struct ip_tunnel *tunnels_l[HASH_SIZE];
74static struct ip_tunnel *tunnels_wc[1];
75static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunnels_r_l };
76 81
77static DEFINE_RWLOCK(ipip6_lock); 82static DEFINE_RWLOCK(ipip6_lock);
78 83
79static struct ip_tunnel * ipip6_tunnel_lookup(__be32 remote, __be32 local) 84static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
85 __be32 remote, __be32 local)
80{ 86{
81 unsigned h0 = HASH(remote); 87 unsigned h0 = HASH(remote);
82 unsigned h1 = HASH(local); 88 unsigned h1 = HASH(local);
83 struct ip_tunnel *t; 89 struct ip_tunnel *t;
90 struct sit_net *sitn = net_generic(net, sit_net_id);
84 91
85 for (t = tunnels_r_l[h0^h1]; t; t = t->next) { 92 for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) {
86 if (local == t->parms.iph.saddr && 93 if (local == t->parms.iph.saddr &&
87 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 94 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
88 return t; 95 return t;
89 } 96 }
90 for (t = tunnels_r[h0]; t; t = t->next) { 97 for (t = sitn->tunnels_r[h0]; t; t = t->next) {
91 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 98 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
92 return t; 99 return t;
93 } 100 }
94 for (t = tunnels_l[h1]; t; t = t->next) { 101 for (t = sitn->tunnels_l[h1]; t; t = t->next) {
95 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) 102 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
96 return t; 103 return t;
97 } 104 }
98 if ((t = tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP)) 105 if ((t = sitn->tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP))
99 return t; 106 return t;
100 return NULL; 107 return NULL;
101} 108}
102 109
103static struct ip_tunnel **__ipip6_bucket(struct ip_tunnel_parm *parms) 110static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn,
111 struct ip_tunnel_parm *parms)
104{ 112{
105 __be32 remote = parms->iph.daddr; 113 __be32 remote = parms->iph.daddr;
106 __be32 local = parms->iph.saddr; 114 __be32 local = parms->iph.saddr;
@@ -115,19 +123,20 @@ static struct ip_tunnel **__ipip6_bucket(struct ip_tunnel_parm *parms)
115 prio |= 1; 123 prio |= 1;
116 h ^= HASH(local); 124 h ^= HASH(local);
117 } 125 }
118 return &tunnels[prio][h]; 126 return &sitn->tunnels[prio][h];
119} 127}
120 128
121static inline struct ip_tunnel **ipip6_bucket(struct ip_tunnel *t) 129static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn,
130 struct ip_tunnel *t)
122{ 131{
123 return __ipip6_bucket(&t->parms); 132 return __ipip6_bucket(sitn, &t->parms);
124} 133}
125 134
126static void ipip6_tunnel_unlink(struct ip_tunnel *t) 135static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
127{ 136{
128 struct ip_tunnel **tp; 137 struct ip_tunnel **tp;
129 138
130 for (tp = ipip6_bucket(t); *tp; tp = &(*tp)->next) { 139 for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) {
131 if (t == *tp) { 140 if (t == *tp) {
132 write_lock_bh(&ipip6_lock); 141 write_lock_bh(&ipip6_lock);
133 *tp = t->next; 142 *tp = t->next;
@@ -137,9 +146,9 @@ static void ipip6_tunnel_unlink(struct ip_tunnel *t)
137 } 146 }
138} 147}
139 148
140static void ipip6_tunnel_link(struct ip_tunnel *t) 149static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
141{ 150{
142 struct ip_tunnel **tp = ipip6_bucket(t); 151 struct ip_tunnel **tp = ipip6_bucket(sitn, t);
143 152
144 t->next = *tp; 153 t->next = *tp;
145 write_lock_bh(&ipip6_lock); 154 write_lock_bh(&ipip6_lock);
@@ -147,15 +156,17 @@ static void ipip6_tunnel_link(struct ip_tunnel *t)
147 write_unlock_bh(&ipip6_lock); 156 write_unlock_bh(&ipip6_lock);
148} 157}
149 158
150static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int create) 159static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
160 struct ip_tunnel_parm *parms, int create)
151{ 161{
152 __be32 remote = parms->iph.daddr; 162 __be32 remote = parms->iph.daddr;
153 __be32 local = parms->iph.saddr; 163 __be32 local = parms->iph.saddr;
154 struct ip_tunnel *t, **tp, *nt; 164 struct ip_tunnel *t, **tp, *nt;
155 struct net_device *dev; 165 struct net_device *dev;
156 char name[IFNAMSIZ]; 166 char name[IFNAMSIZ];
167 struct sit_net *sitn = net_generic(net, sit_net_id);
157 168
158 for (tp = __ipip6_bucket(parms); (t = *tp) != NULL; tp = &t->next) { 169 for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) {
159 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) 170 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
160 return t; 171 return t;
161 } 172 }
@@ -171,6 +182,8 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
171 if (dev == NULL) 182 if (dev == NULL)
172 return NULL; 183 return NULL;
173 184
185 dev_net_set(dev, net);
186
174 if (strchr(name, '%')) { 187 if (strchr(name, '%')) {
175 if (dev_alloc_name(dev, name) < 0) 188 if (dev_alloc_name(dev, name) < 0)
176 goto failed_free; 189 goto failed_free;
@@ -188,7 +201,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
188 201
189 dev_hold(dev); 202 dev_hold(dev);
190 203
191 ipip6_tunnel_link(nt); 204 ipip6_tunnel_link(sitn, nt);
192 return nt; 205 return nt;
193 206
194failed_free: 207failed_free:
@@ -197,15 +210,192 @@ failed:
197 return NULL; 210 return NULL;
198} 211}
199 212
213static struct ip_tunnel_prl_entry *
214__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
215{
216 struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *)NULL;
217
218 for (p = t->prl; p; p = p->next)
219 if (p->addr == addr)
220 break;
221 return p;
222
223}
224
225static int ipip6_tunnel_get_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
226{
227 struct ip_tunnel_prl *kp;
228 struct ip_tunnel_prl_entry *prl;
229 unsigned int cmax, c = 0, ca, len;
230 int ret = 0;
231
232 cmax = a->datalen / sizeof(*a);
233 if (cmax > 1 && a->addr != htonl(INADDR_ANY))
234 cmax = 1;
235
236 /* For simple GET or for root users,
237 * we try harder to allocate.
238 */
239 kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
240 kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
241 NULL;
242
243 read_lock(&ipip6_lock);
244
245 ca = t->prl_count < cmax ? t->prl_count : cmax;
246
247 if (!kp) {
248 /* We don't try hard to allocate much memory for
249 * non-root users.
250 * For root users, retry allocating enough memory for
251 * the answer.
252 */
253 kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
254 if (!kp) {
255 ret = -ENOMEM;
256 goto out;
257 }
258 }
259
260 c = 0;
261 for (prl = t->prl; prl; prl = prl->next) {
262 if (c > cmax)
263 break;
264 if (a->addr != htonl(INADDR_ANY) && prl->addr != a->addr)
265 continue;
266 kp[c].addr = prl->addr;
267 kp[c].flags = prl->flags;
268 c++;
269 if (a->addr != htonl(INADDR_ANY))
270 break;
271 }
272out:
273 read_unlock(&ipip6_lock);
274
275 len = sizeof(*kp) * c;
276 ret = len ? copy_to_user(a->data, kp, len) : 0;
277
278 kfree(kp);
279 if (ret)
280 return -EFAULT;
281
282 a->datalen = len;
283 return 0;
284}
285
286static int
287ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
288{
289 struct ip_tunnel_prl_entry *p;
290 int err = 0;
291
292 if (a->addr == htonl(INADDR_ANY))
293 return -EINVAL;
294
295 write_lock(&ipip6_lock);
296
297 for (p = t->prl; p; p = p->next) {
298 if (p->addr == a->addr) {
299 if (chg)
300 goto update;
301 err = -EEXIST;
302 goto out;
303 }
304 }
305
306 if (chg) {
307 err = -ENXIO;
308 goto out;
309 }
310
311 p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL);
312 if (!p) {
313 err = -ENOBUFS;
314 goto out;
315 }
316
317 p->next = t->prl;
318 t->prl = p;
319 t->prl_count++;
320update:
321 p->addr = a->addr;
322 p->flags = a->flags;
323out:
324 write_unlock(&ipip6_lock);
325 return err;
326}
327
328static int
329ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
330{
331 struct ip_tunnel_prl_entry *x, **p;
332 int err = 0;
333
334 write_lock(&ipip6_lock);
335
336 if (a && a->addr != htonl(INADDR_ANY)) {
337 for (p = &t->prl; *p; p = &(*p)->next) {
338 if ((*p)->addr == a->addr) {
339 x = *p;
340 *p = x->next;
341 kfree(x);
342 t->prl_count--;
343 goto out;
344 }
345 }
346 err = -ENXIO;
347 } else {
348 while (t->prl) {
349 x = t->prl;
350 t->prl = t->prl->next;
351 kfree(x);
352 t->prl_count--;
353 }
354 }
355out:
356 write_unlock(&ipip6_lock);
357 return 0;
358}
359
360static int
361isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t)
362{
363 struct ip_tunnel_prl_entry *p;
364 int ok = 1;
365
366 read_lock(&ipip6_lock);
367 p = __ipip6_tunnel_locate_prl(t, iph->saddr);
368 if (p) {
369 if (p->flags & PRL_DEFAULT)
370 skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT;
371 else
372 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
373 } else {
374 struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
375 if (ipv6_addr_is_isatap(addr6) &&
376 (addr6->s6_addr32[3] == iph->saddr) &&
377 ipv6_chk_prefix(addr6, t->dev))
378 skb->ndisc_nodetype = NDISC_NODETYPE_HOST;
379 else
380 ok = 0;
381 }
382 read_unlock(&ipip6_lock);
383 return ok;
384}
385
200static void ipip6_tunnel_uninit(struct net_device *dev) 386static void ipip6_tunnel_uninit(struct net_device *dev)
201{ 387{
202 if (dev == ipip6_fb_tunnel_dev) { 388 struct net *net = dev_net(dev);
389 struct sit_net *sitn = net_generic(net, sit_net_id);
390
391 if (dev == sitn->fb_tunnel_dev) {
203 write_lock_bh(&ipip6_lock); 392 write_lock_bh(&ipip6_lock);
204 tunnels_wc[0] = NULL; 393 sitn->tunnels_wc[0] = NULL;
205 write_unlock_bh(&ipip6_lock); 394 write_unlock_bh(&ipip6_lock);
206 dev_put(dev); 395 dev_put(dev);
207 } else { 396 } else {
208 ipip6_tunnel_unlink(netdev_priv(dev)); 397 ipip6_tunnel_unlink(sitn, netdev_priv(dev));
398 ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
209 dev_put(dev); 399 dev_put(dev);
210 } 400 }
211} 401}
@@ -256,7 +446,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
256 err = -ENOENT; 446 err = -ENOENT;
257 447
258 read_lock(&ipip6_lock); 448 read_lock(&ipip6_lock);
259 t = ipip6_tunnel_lookup(iph->daddr, iph->saddr); 449 t = ipip6_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
260 if (t == NULL || t->parms.iph.daddr == 0) 450 if (t == NULL || t->parms.iph.daddr == 0)
261 goto out; 451 goto out;
262 452
@@ -339,11 +529,12 @@ out:
339 skb_reset_network_header(skb2); 529 skb_reset_network_header(skb2);
340 530
341 /* Try to guess incoming interface */ 531 /* Try to guess incoming interface */
342 rt6i = rt6_lookup(&iph6->saddr, NULL, NULL, 0); 532 rt6i = rt6_lookup(dev_net(skb->dev), &iph6->saddr, NULL, NULL, 0);
343 if (rt6i && rt6i->rt6i_dev) { 533 if (rt6i && rt6i->rt6i_dev) {
344 skb2->dev = rt6i->rt6i_dev; 534 skb2->dev = rt6i->rt6i_dev;
345 535
346 rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); 536 rt6i = rt6_lookup(dev_net(skb->dev),
537 &iph6->daddr, &iph6->saddr, NULL, 0);
347 538
348 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { 539 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) {
349 struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); 540 struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev);
@@ -365,48 +556,6 @@ static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
365 IP6_ECN_set_ce(ipv6_hdr(skb)); 556 IP6_ECN_set_ce(ipv6_hdr(skb));
366} 557}
367 558
368/* ISATAP (RFC4214) - check source address */
369static int
370isatap_srcok(struct sk_buff *skb, struct iphdr *iph, struct net_device *dev)
371{
372 struct neighbour *neigh;
373 struct dst_entry *dst;
374 struct rt6_info *rt;
375 struct flowi fl;
376 struct in6_addr *addr6;
377 struct in6_addr rtr;
378 struct ipv6hdr *iph6;
379 int ok = 0;
380
381 /* from onlink default router */
382 ipv6_addr_set(&rtr, htonl(0xFE800000), 0, 0, 0);
383 ipv6_isatap_eui64(rtr.s6_addr + 8, iph->saddr);
384 if ((rt = rt6_get_dflt_router(&rtr, dev))) {
385 dst_release(&rt->u.dst);
386 return 1;
387 }
388
389 iph6 = ipv6_hdr(skb);
390 memset(&fl, 0, sizeof(fl));
391 fl.proto = iph6->nexthdr;
392 ipv6_addr_copy(&fl.fl6_dst, &iph6->saddr);
393 fl.oif = dev->ifindex;
394 security_skb_classify_flow(skb, &fl);
395
396 dst = ip6_route_output(NULL, &fl);
397 if (!dst->error && (dst->dev == dev) && (neigh = dst->neighbour)) {
398
399 addr6 = (struct in6_addr*)&neigh->primary_key;
400
401 /* from correct previous hop */
402 if (ipv6_addr_is_isatap(addr6) &&
403 (addr6->s6_addr32[3] == iph->saddr))
404 ok = 1;
405 }
406 dst_release(dst);
407 return ok;
408}
409
410static int ipip6_rcv(struct sk_buff *skb) 559static int ipip6_rcv(struct sk_buff *skb)
411{ 560{
412 struct iphdr *iph; 561 struct iphdr *iph;
@@ -418,7 +567,8 @@ static int ipip6_rcv(struct sk_buff *skb)
418 iph = ip_hdr(skb); 567 iph = ip_hdr(skb);
419 568
420 read_lock(&ipip6_lock); 569 read_lock(&ipip6_lock);
421 if ((tunnel = ipip6_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) { 570 if ((tunnel = ipip6_tunnel_lookup(dev_net(skb->dev),
571 iph->saddr, iph->daddr)) != NULL) {
422 secpath_reset(skb); 572 secpath_reset(skb);
423 skb->mac_header = skb->network_header; 573 skb->mac_header = skb->network_header;
424 skb_reset_network_header(skb); 574 skb_reset_network_header(skb);
@@ -427,7 +577,7 @@ static int ipip6_rcv(struct sk_buff *skb)
427 skb->pkt_type = PACKET_HOST; 577 skb->pkt_type = PACKET_HOST;
428 578
429 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 579 if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
430 !isatap_srcok(skb, iph, tunnel->dev)) { 580 !isatap_chksrc(skb, iph, tunnel)) {
431 tunnel->stat.rx_errors++; 581 tunnel->stat.rx_errors++;
432 read_unlock(&ipip6_lock); 582 read_unlock(&ipip6_lock);
433 kfree_skb(skb); 583 kfree_skb(skb);
@@ -554,7 +704,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
554 .tos = RT_TOS(tos) } }, 704 .tos = RT_TOS(tos) } },
555 .oif = tunnel->parms.link, 705 .oif = tunnel->parms.link,
556 .proto = IPPROTO_IPV6 }; 706 .proto = IPPROTO_IPV6 };
557 if (ip_route_output_key(&init_net, &rt, &fl)) { 707 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
558 tunnel->stat.tx_carrier_errors++; 708 tunnel->stat.tx_carrier_errors++;
559 goto tx_error_icmp; 709 goto tx_error_icmp;
560 } 710 }
@@ -683,7 +833,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
683 .oif = tunnel->parms.link, 833 .oif = tunnel->parms.link,
684 .proto = IPPROTO_IPV6 }; 834 .proto = IPPROTO_IPV6 };
685 struct rtable *rt; 835 struct rtable *rt;
686 if (!ip_route_output_key(&init_net, &rt, &fl)) { 836 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
687 tdev = rt->u.dst.dev; 837 tdev = rt->u.dst.dev;
688 ip_rt_put(rt); 838 ip_rt_put(rt);
689 } 839 }
@@ -691,7 +841,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
691 } 841 }
692 842
693 if (!tdev && tunnel->parms.link) 843 if (!tdev && tunnel->parms.link)
694 tdev = __dev_get_by_index(&init_net, tunnel->parms.link); 844 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
695 845
696 if (tdev) { 846 if (tdev) {
697 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); 847 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
@@ -707,17 +857,20 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
707{ 857{
708 int err = 0; 858 int err = 0;
709 struct ip_tunnel_parm p; 859 struct ip_tunnel_parm p;
860 struct ip_tunnel_prl prl;
710 struct ip_tunnel *t; 861 struct ip_tunnel *t;
862 struct net *net = dev_net(dev);
863 struct sit_net *sitn = net_generic(net, sit_net_id);
711 864
712 switch (cmd) { 865 switch (cmd) {
713 case SIOCGETTUNNEL: 866 case SIOCGETTUNNEL:
714 t = NULL; 867 t = NULL;
715 if (dev == ipip6_fb_tunnel_dev) { 868 if (dev == sitn->fb_tunnel_dev) {
716 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 869 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
717 err = -EFAULT; 870 err = -EFAULT;
718 break; 871 break;
719 } 872 }
720 t = ipip6_tunnel_locate(&p, 0); 873 t = ipip6_tunnel_locate(net, &p, 0);
721 } 874 }
722 if (t == NULL) 875 if (t == NULL)
723 t = netdev_priv(dev); 876 t = netdev_priv(dev);
@@ -743,9 +896,9 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
743 if (p.iph.ttl) 896 if (p.iph.ttl)
744 p.iph.frag_off |= htons(IP_DF); 897 p.iph.frag_off |= htons(IP_DF);
745 898
746 t = ipip6_tunnel_locate(&p, cmd == SIOCADDTUNNEL); 899 t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
747 900
748 if (dev != ipip6_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 901 if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
749 if (t != NULL) { 902 if (t != NULL) {
750 if (t->dev != dev) { 903 if (t->dev != dev) {
751 err = -EEXIST; 904 err = -EEXIST;
@@ -758,12 +911,12 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
758 break; 911 break;
759 } 912 }
760 t = netdev_priv(dev); 913 t = netdev_priv(dev);
761 ipip6_tunnel_unlink(t); 914 ipip6_tunnel_unlink(sitn, t);
762 t->parms.iph.saddr = p.iph.saddr; 915 t->parms.iph.saddr = p.iph.saddr;
763 t->parms.iph.daddr = p.iph.daddr; 916 t->parms.iph.daddr = p.iph.daddr;
764 memcpy(dev->dev_addr, &p.iph.saddr, 4); 917 memcpy(dev->dev_addr, &p.iph.saddr, 4);
765 memcpy(dev->broadcast, &p.iph.daddr, 4); 918 memcpy(dev->broadcast, &p.iph.daddr, 4);
766 ipip6_tunnel_link(t); 919 ipip6_tunnel_link(sitn, t);
767 netdev_state_change(dev); 920 netdev_state_change(dev);
768 } 921 }
769 } 922 }
@@ -790,15 +943,15 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
790 if (!capable(CAP_NET_ADMIN)) 943 if (!capable(CAP_NET_ADMIN))
791 goto done; 944 goto done;
792 945
793 if (dev == ipip6_fb_tunnel_dev) { 946 if (dev == sitn->fb_tunnel_dev) {
794 err = -EFAULT; 947 err = -EFAULT;
795 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 948 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
796 goto done; 949 goto done;
797 err = -ENOENT; 950 err = -ENOENT;
798 if ((t = ipip6_tunnel_locate(&p, 0)) == NULL) 951 if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL)
799 goto done; 952 goto done;
800 err = -EPERM; 953 err = -EPERM;
801 if (t == netdev_priv(ipip6_fb_tunnel_dev)) 954 if (t == netdev_priv(sitn->fb_tunnel_dev))
802 goto done; 955 goto done;
803 dev = t->dev; 956 dev = t->dev;
804 } 957 }
@@ -806,6 +959,42 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
806 err = 0; 959 err = 0;
807 break; 960 break;
808 961
962 case SIOCGETPRL:
963 case SIOCADDPRL:
964 case SIOCDELPRL:
965 case SIOCCHGPRL:
966 err = -EPERM;
967 if (cmd != SIOCGETPRL && !capable(CAP_NET_ADMIN))
968 goto done;
969 err = -EINVAL;
970 if (dev == sitn->fb_tunnel_dev)
971 goto done;
972 err = -EFAULT;
973 if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl)))
974 goto done;
975 err = -ENOENT;
976 if (!(t = netdev_priv(dev)))
977 goto done;
978
979 switch (cmd) {
980 case SIOCGETPRL:
981 err = ipip6_tunnel_get_prl(t, &prl);
982 if (!err && copy_to_user(ifr->ifr_ifru.ifru_data,
983 &prl, sizeof(prl)))
984 err = -EFAULT;
985 break;
986 case SIOCDELPRL:
987 err = ipip6_tunnel_del_prl(t, &prl);
988 break;
989 case SIOCADDPRL:
990 case SIOCCHGPRL:
991 err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
992 break;
993 }
994 if (cmd != SIOCGETPRL)
995 netdev_state_change(dev);
996 break;
997
809 default: 998 default:
810 err = -EINVAL; 999 err = -EINVAL;
811 } 1000 }
@@ -842,6 +1031,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
842 dev->flags = IFF_NOARP; 1031 dev->flags = IFF_NOARP;
843 dev->iflink = 0; 1032 dev->iflink = 0;
844 dev->addr_len = 4; 1033 dev->addr_len = 4;
1034 dev->features |= NETIF_F_NETNS_LOCAL;
845} 1035}
846 1036
847static int ipip6_tunnel_init(struct net_device *dev) 1037static int ipip6_tunnel_init(struct net_device *dev)
@@ -861,10 +1051,12 @@ static int ipip6_tunnel_init(struct net_device *dev)
861 return 0; 1051 return 0;
862} 1052}
863 1053
864static int __init ipip6_fb_tunnel_init(struct net_device *dev) 1054static int ipip6_fb_tunnel_init(struct net_device *dev)
865{ 1055{
866 struct ip_tunnel *tunnel = netdev_priv(dev); 1056 struct ip_tunnel *tunnel = netdev_priv(dev);
867 struct iphdr *iph = &tunnel->parms.iph; 1057 struct iphdr *iph = &tunnel->parms.iph;
1058 struct net *net = dev_net(dev);
1059 struct sit_net *sitn = net_generic(net, sit_net_id);
868 1060
869 tunnel->dev = dev; 1061 tunnel->dev = dev;
870 strcpy(tunnel->parms.name, dev->name); 1062 strcpy(tunnel->parms.name, dev->name);
@@ -875,7 +1067,7 @@ static int __init ipip6_fb_tunnel_init(struct net_device *dev)
875 iph->ttl = 64; 1067 iph->ttl = 64;
876 1068
877 dev_hold(dev); 1069 dev_hold(dev);
878 tunnels_wc[0] = tunnel; 1070 sitn->tunnels_wc[0] = tunnel;
879 return 0; 1071 return 0;
880} 1072}
881 1073
@@ -885,7 +1077,7 @@ static struct xfrm_tunnel sit_handler = {
885 .priority = 1, 1077 .priority = 1,
886}; 1078};
887 1079
888static void __exit sit_destroy_tunnels(void) 1080static void sit_destroy_tunnels(struct sit_net *sitn)
889{ 1081{
890 int prio; 1082 int prio;
891 1083
@@ -893,20 +1085,78 @@ static void __exit sit_destroy_tunnels(void)
893 int h; 1085 int h;
894 for (h = 0; h < HASH_SIZE; h++) { 1086 for (h = 0; h < HASH_SIZE; h++) {
895 struct ip_tunnel *t; 1087 struct ip_tunnel *t;
896 while ((t = tunnels[prio][h]) != NULL) 1088 while ((t = sitn->tunnels[prio][h]) != NULL)
897 unregister_netdevice(t->dev); 1089 unregister_netdevice(t->dev);
898 } 1090 }
899 } 1091 }
900} 1092}
901 1093
902static void __exit sit_cleanup(void) 1094static int sit_init_net(struct net *net)
903{ 1095{
904 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1096 int err;
1097 struct sit_net *sitn;
1098
1099 err = -ENOMEM;
1100 sitn = kzalloc(sizeof(struct sit_net), GFP_KERNEL);
1101 if (sitn == NULL)
1102 goto err_alloc;
1103
1104 err = net_assign_generic(net, sit_net_id, sitn);
1105 if (err < 0)
1106 goto err_assign;
1107
1108 sitn->tunnels[0] = sitn->tunnels_wc;
1109 sitn->tunnels[1] = sitn->tunnels_l;
1110 sitn->tunnels[2] = sitn->tunnels_r;
1111 sitn->tunnels[3] = sitn->tunnels_r_l;
1112
1113 sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
1114 ipip6_tunnel_setup);
1115 if (!sitn->fb_tunnel_dev) {
1116 err = -ENOMEM;
1117 goto err_alloc_dev;
1118 }
1119
1120 sitn->fb_tunnel_dev->init = ipip6_fb_tunnel_init;
1121 dev_net_set(sitn->fb_tunnel_dev, net);
1122
1123 if ((err = register_netdev(sitn->fb_tunnel_dev)))
1124 goto err_reg_dev;
905 1125
1126 return 0;
1127
1128err_reg_dev:
1129 free_netdev(sitn->fb_tunnel_dev);
1130err_alloc_dev:
1131 /* nothing */
1132err_assign:
1133 kfree(sitn);
1134err_alloc:
1135 return err;
1136}
1137
1138static void sit_exit_net(struct net *net)
1139{
1140 struct sit_net *sitn;
1141
1142 sitn = net_generic(net, sit_net_id);
906 rtnl_lock(); 1143 rtnl_lock();
907 sit_destroy_tunnels(); 1144 sit_destroy_tunnels(sitn);
908 unregister_netdevice(ipip6_fb_tunnel_dev); 1145 unregister_netdevice(sitn->fb_tunnel_dev);
909 rtnl_unlock(); 1146 rtnl_unlock();
1147 kfree(sitn);
1148}
1149
1150static struct pernet_operations sit_net_ops = {
1151 .init = sit_init_net,
1152 .exit = sit_exit_net,
1153};
1154
1155static void __exit sit_cleanup(void)
1156{
1157 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1158
1159 unregister_pernet_gen_device(sit_net_id, &sit_net_ops);
910} 1160}
911 1161
912static int __init sit_init(void) 1162static int __init sit_init(void)
@@ -920,25 +1170,11 @@ static int __init sit_init(void)
920 return -EAGAIN; 1170 return -EAGAIN;
921 } 1171 }
922 1172
923 ipip6_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0", 1173 err = register_pernet_gen_device(&sit_net_id, &sit_net_ops);
924 ipip6_tunnel_setup); 1174 if (err < 0)
925 if (!ipip6_fb_tunnel_dev) { 1175 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
926 err = -ENOMEM;
927 goto err1;
928 }
929
930 ipip6_fb_tunnel_dev->init = ipip6_fb_tunnel_init;
931 1176
932 if ((err = register_netdev(ipip6_fb_tunnel_dev)))
933 goto err2;
934
935 out:
936 return err; 1177 return err;
937 err2:
938 free_netdev(ipip6_fb_tunnel_dev);
939 err1:
940 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
941 goto out;
942} 1178}
943 1179
944module_init(sit_init); 1180module_init(sit_init);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
new file mode 100644
index 000000000000..938ce4ecde55
--- /dev/null
+++ b/net/ipv6/syncookies.c
@@ -0,0 +1,279 @@
1/*
2 * IPv6 Syncookies implementation for the Linux kernel
3 *
4 * Authors:
5 * Glenn Griffin <ggriffin.kernel@gmail.com>
6 *
7 * Based on IPv4 implementation by Andi Kleen
8 * linux/net/ipv4/syncookies.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/tcp.h>
18#include <linux/random.h>
19#include <linux/cryptohash.h>
20#include <linux/kernel.h>
21#include <net/ipv6.h>
22#include <net/tcp.h>
23
24extern int sysctl_tcp_syncookies;
25extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
26
27#define COOKIEBITS 24 /* Upper bits store count */
28#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
29
30/*
31 * This table has to be sorted and terminated with (__u16)-1.
32 * XXX generate a better table.
33 * Unresolved Issues: HIPPI with a 64k MSS is not well supported.
34 *
35 * Taken directly from ipv4 implementation.
36 * Should this list be modified for ipv6 use or is it close enough?
37 * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart
38 */
39static __u16 const msstab[] = {
40 64 - 1,
41 256 - 1,
42 512 - 1,
43 536 - 1,
44 1024 - 1,
45 1440 - 1,
46 1460 - 1,
47 4312 - 1,
48 (__u16)-1
49};
50/* The number doesn't include the -1 terminator */
51#define NUM_MSS (ARRAY_SIZE(msstab) - 1)
52
53/*
54 * This (misnamed) value is the age of syncookie which is permitted.
55 * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
56 * sysctl_tcp_retries1. It's a rather complicated formula (exponential
57 * backoff) to compute at runtime so it's currently hardcoded here.
58 */
59#define COUNTER_TRIES 4
60
61static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
62 struct request_sock *req,
63 struct dst_entry *dst)
64{
65 struct inet_connection_sock *icsk = inet_csk(sk);
66 struct sock *child;
67
68 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
69 if (child)
70 inet_csk_reqsk_queue_add(sk, req, child);
71 else
72 reqsk_free(req);
73
74 return child;
75}
76
77static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
78
79static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
80 __be16 sport, __be16 dport, u32 count, int c)
81{
82 __u32 *tmp = __get_cpu_var(cookie_scratch);
83
84 /*
85 * we have 320 bits of information to hash, copy in the remaining
86 * 192 bits required for sha_transform, from the syncookie_secret
87 * and overwrite the digest with the secret
88 */
89 memcpy(tmp + 10, syncookie_secret[c], 44);
90 memcpy(tmp, saddr, 16);
91 memcpy(tmp + 4, daddr, 16);
92 tmp[8] = ((__force u32)sport << 16) + (__force u32)dport;
93 tmp[9] = count;
94 sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
95
96 return tmp[17];
97}
98
99static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *daddr,
100 __be16 sport, __be16 dport, __u32 sseq,
101 __u32 count, __u32 data)
102{
103 return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
104 sseq + (count << COOKIEBITS) +
105 ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
106 & COOKIEMASK));
107}
108
109static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr,
110 struct in6_addr *daddr, __be16 sport,
111 __be16 dport, __u32 sseq, __u32 count,
112 __u32 maxdiff)
113{
114 __u32 diff;
115
116 cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
117
118 diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
119 if (diff >= maxdiff)
120 return (__u32)-1;
121
122 return (cookie -
123 cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
124 & COOKIEMASK;
125}
126
127__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
128{
129 struct ipv6hdr *iph = ipv6_hdr(skb);
130 const struct tcphdr *th = tcp_hdr(skb);
131 int mssind;
132 const __u16 mss = *mssp;
133
134 tcp_sk(sk)->last_synq_overflow = jiffies;
135
136 for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
137 ;
138 *mssp = msstab[mssind] + 1;
139
140 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
141
142 return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
143 th->dest, ntohl(th->seq),
144 jiffies / (HZ * 60), mssind);
145}
146
147static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
148{
149 struct ipv6hdr *iph = ipv6_hdr(skb);
150 const struct tcphdr *th = tcp_hdr(skb);
151 __u32 seq = ntohl(th->seq) - 1;
152 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
153 th->source, th->dest, seq,
154 jiffies / (HZ * 60), COUNTER_TRIES);
155
156 return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
157}
158
159struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
160{
161 struct inet_request_sock *ireq;
162 struct inet6_request_sock *ireq6;
163 struct tcp_request_sock *treq;
164 struct ipv6_pinfo *np = inet6_sk(sk);
165 struct tcp_sock *tp = tcp_sk(sk);
166 const struct tcphdr *th = tcp_hdr(skb);
167 __u32 cookie = ntohl(th->ack_seq) - 1;
168 struct sock *ret = sk;
169 struct request_sock *req;
170 int mss;
171 struct dst_entry *dst;
172 __u8 rcv_wscale;
173 struct tcp_options_received tcp_opt;
174
175 if (!sysctl_tcp_syncookies || !th->ack)
176 goto out;
177
178 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
179 (mss = cookie_check(skb, cookie)) == 0) {
180 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED);
181 goto out;
182 }
183
184 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
185
186 /* check for timestamp cookie support */
187 memset(&tcp_opt, 0, sizeof(tcp_opt));
188 tcp_parse_options(skb, &tcp_opt, 0);
189
190 if (tcp_opt.saw_tstamp)
191 cookie_check_timestamp(&tcp_opt);
192
193 ret = NULL;
194 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
195 if (!req)
196 goto out;
197
198 ireq = inet_rsk(req);
199 ireq6 = inet6_rsk(req);
200 treq = tcp_rsk(req);
201 ireq6->pktopts = NULL;
202
203 if (security_inet_conn_request(sk, skb, req)) {
204 reqsk_free(req);
205 goto out;
206 }
207
208 req->mss = mss;
209 ireq->rmt_port = th->source;
210 ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
211 ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
212 if (ipv6_opt_accepted(sk, skb) ||
213 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
214 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
215 atomic_inc(&skb->users);
216 ireq6->pktopts = skb;
217 }
218
219 ireq6->iif = sk->sk_bound_dev_if;
220 /* So that link locals have meaning */
221 if (!sk->sk_bound_dev_if &&
222 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
223 ireq6->iif = inet6_iif(skb);
224
225 req->expires = 0UL;
226 req->retrans = 0;
227 ireq->snd_wscale = tcp_opt.snd_wscale;
228 ireq->rcv_wscale = tcp_opt.rcv_wscale;
229 ireq->sack_ok = tcp_opt.sack_ok;
230 ireq->wscale_ok = tcp_opt.wscale_ok;
231 ireq->tstamp_ok = tcp_opt.saw_tstamp;
232 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
233 treq->rcv_isn = ntohl(th->seq) - 1;
234 treq->snt_isn = cookie;
235
236 /*
237 * We need to lookup the dst_entry to get the correct window size.
238 * This is taken from tcp_v6_syn_recv_sock. Somebody please enlighten
239 * me if there is a preferred way.
240 */
241 {
242 struct in6_addr *final_p = NULL, final;
243 struct flowi fl;
244 memset(&fl, 0, sizeof(fl));
245 fl.proto = IPPROTO_TCP;
246 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
247 if (np->opt && np->opt->srcrt) {
248 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
249 ipv6_addr_copy(&final, &fl.fl6_dst);
250 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
251 final_p = &final;
252 }
253 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
254 fl.oif = sk->sk_bound_dev_if;
255 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
256 fl.fl_ip_sport = inet_sk(sk)->sport;
257 security_req_classify_flow(req, &fl);
258 if (ip6_dst_lookup(sk, &dst, &fl)) {
259 reqsk_free(req);
260 goto out;
261 }
262 if (final_p)
263 ipv6_addr_copy(&fl.fl6_dst, final_p);
264 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
265 goto out;
266 }
267
268 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
269 tcp_select_initial_window(tcp_full_space(sk), req->mss,
270 &req->rcv_wnd, &req->window_clamp,
271 ireq->wscale_ok, &rcv_wscale);
272
273 ireq->rcv_wscale = rcv_wscale;
274
275 ret = get_cookie_sock(sk, skb, req, dst);
276
277out: return ret;
278}
279
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index d6d3e68086f8..3804dcbbfab0 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -71,24 +71,11 @@ static int ipv6_sysctl_net_init(struct net *net)
71 ipv6_route_table = ipv6_route_sysctl_init(net); 71 ipv6_route_table = ipv6_route_sysctl_init(net);
72 if (!ipv6_route_table) 72 if (!ipv6_route_table)
73 goto out_ipv6_table; 73 goto out_ipv6_table;
74 ipv6_table[0].child = ipv6_route_table;
74 75
75 ipv6_icmp_table = ipv6_icmp_sysctl_init(net); 76 ipv6_icmp_table = ipv6_icmp_sysctl_init(net);
76 if (!ipv6_icmp_table) 77 if (!ipv6_icmp_table)
77 goto out_ipv6_route_table; 78 goto out_ipv6_route_table;
78
79 ipv6_route_table[0].data = &net->ipv6.sysctl.flush_delay;
80 /* ipv6_route_table[1].data will be handled when we have
81 routes per namespace */
82 ipv6_route_table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
83 ipv6_route_table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
84 ipv6_route_table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
85 ipv6_route_table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
86 ipv6_route_table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
87 ipv6_route_table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
88 ipv6_route_table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
89 ipv6_table[0].child = ipv6_route_table;
90
91 ipv6_icmp_table[0].data = &net->ipv6.sysctl.icmpv6_time;
92 ipv6_table[1].child = ipv6_icmp_table; 79 ipv6_table[1].child = ipv6_icmp_table;
93 80
94 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; 81 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 12750f2b05ab..715965f0fac0 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -60,6 +60,7 @@
60#include <net/dsfield.h> 60#include <net/dsfield.h>
61#include <net/timewait_sock.h> 61#include <net/timewait_sock.h>
62#include <net/netdma.h> 62#include <net/netdma.h>
63#include <net/inet_common.h>
63 64
64#include <asm/uaccess.h> 65#include <asm/uaccess.h>
65 66
@@ -69,9 +70,6 @@
69#include <linux/crypto.h> 70#include <linux/crypto.h>
70#include <linux/scatterlist.h> 71#include <linux/scatterlist.h>
71 72
72/* Socket used for sending RSTs and ACKs */
73static struct socket *tcp6_socket;
74
75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); 73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); 74static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
77static void tcp_v6_send_check(struct sock *sk, int len, 75static void tcp_v6_send_check(struct sock *sk, int len,
@@ -324,7 +322,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
324 struct tcp_sock *tp; 322 struct tcp_sock *tp;
325 __u32 seq; 323 __u32 seq;
326 324
327 sk = inet6_lookup(skb->dev->nd_net, &tcp_hashinfo, &hdr->daddr, 325 sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr,
328 th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 326 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
329 327
330 if (sk == NULL) { 328 if (sk == NULL) {
@@ -455,8 +453,7 @@ out:
455} 453}
456 454
457 455
458static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 456static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
459 struct dst_entry *dst)
460{ 457{
461 struct inet6_request_sock *treq = inet6_rsk(req); 458 struct inet6_request_sock *treq = inet6_rsk(req);
462 struct ipv6_pinfo *np = inet6_sk(sk); 459 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -464,6 +461,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
464 struct ipv6_txoptions *opt = NULL; 461 struct ipv6_txoptions *opt = NULL;
465 struct in6_addr * final_p = NULL, final; 462 struct in6_addr * final_p = NULL, final;
466 struct flowi fl; 463 struct flowi fl;
464 struct dst_entry *dst;
467 int err = -1; 465 int err = -1;
468 466
469 memset(&fl, 0, sizeof(fl)); 467 memset(&fl, 0, sizeof(fl));
@@ -476,24 +474,22 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
476 fl.fl_ip_sport = inet_sk(sk)->sport; 474 fl.fl_ip_sport = inet_sk(sk)->sport;
477 security_req_classify_flow(req, &fl); 475 security_req_classify_flow(req, &fl);
478 476
479 if (dst == NULL) { 477 opt = np->opt;
480 opt = np->opt; 478 if (opt && opt->srcrt) {
481 if (opt && opt->srcrt) { 479 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
482 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 480 ipv6_addr_copy(&final, &fl.fl6_dst);
483 ipv6_addr_copy(&final, &fl.fl6_dst); 481 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
484 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 482 final_p = &final;
485 final_p = &final;
486 }
487
488 err = ip6_dst_lookup(sk, &dst, &fl);
489 if (err)
490 goto done;
491 if (final_p)
492 ipv6_addr_copy(&fl.fl6_dst, final_p);
493 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
494 goto done;
495 } 483 }
496 484
485 err = ip6_dst_lookup(sk, &dst, &fl);
486 if (err)
487 goto done;
488 if (final_p)
489 ipv6_addr_copy(&fl.fl6_dst, final_p);
490 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
491 goto done;
492
497 skb = tcp_make_synack(sk, dst, req); 493 skb = tcp_make_synack(sk, dst, req);
498 if (skb) { 494 if (skb) {
499 struct tcphdr *th = tcp_hdr(skb); 495 struct tcphdr *th = tcp_hdr(skb);
@@ -514,6 +510,20 @@ done:
514 return err; 510 return err;
515} 511}
516 512
513static inline void syn_flood_warning(struct sk_buff *skb)
514{
515#ifdef CONFIG_SYN_COOKIES
516 if (sysctl_tcp_syncookies)
517 printk(KERN_INFO
518 "TCPv6: Possible SYN flooding on port %d. "
519 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
520 else
521#endif
522 printk(KERN_INFO
523 "TCPv6: Possible SYN flooding on port %d. "
524 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
525}
526
517static void tcp_v6_reqsk_destructor(struct request_sock *req) 527static void tcp_v6_reqsk_destructor(struct request_sock *req)
518{ 528{
519 if (inet6_rsk(req)->pktopts) 529 if (inet6_rsk(req)->pktopts)
@@ -533,7 +543,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
533 return NULL; 543 return NULL;
534 544
535 for (i = 0; i < tp->md5sig_info->entries6; i++) { 545 for (i = 0; i < tp->md5sig_info->entries6; i++) {
536 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) 546 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
537 return &tp->md5sig_info->keys6[i].base; 547 return &tp->md5sig_info->keys6[i].base;
538 } 548 }
539 return NULL; 549 return NULL;
@@ -622,7 +632,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
622 int i; 632 int i;
623 633
624 for (i = 0; i < tp->md5sig_info->entries6; i++) { 634 for (i = 0; i < tp->md5sig_info->entries6; i++) {
625 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { 635 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
626 /* Free the key */ 636 /* Free the key */
627 kfree(tp->md5sig_info->keys6[i].base.key); 637 kfree(tp->md5sig_info->keys6[i].base.key);
628 tp->md5sig_info->entries6--; 638 tp->md5sig_info->entries6--;
@@ -741,7 +751,7 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
741 751
742 hp = tcp_get_md5sig_pool(); 752 hp = tcp_get_md5sig_pool();
743 if (!hp) { 753 if (!hp) {
744 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__); 754 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
745 goto clear_hash_noput; 755 goto clear_hash_noput;
746 } 756 }
747 bp = &hp->md5_blk.ip6; 757 bp = &hp->md5_blk.ip6;
@@ -781,17 +791,17 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
781 /* Now store the hash into the packet */ 791 /* Now store the hash into the packet */
782 err = crypto_hash_init(desc); 792 err = crypto_hash_init(desc);
783 if (err) { 793 if (err) {
784 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__); 794 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
785 goto clear_hash; 795 goto clear_hash;
786 } 796 }
787 err = crypto_hash_update(desc, sg, nbytes); 797 err = crypto_hash_update(desc, sg, nbytes);
788 if (err) { 798 if (err) {
789 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__); 799 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
790 goto clear_hash; 800 goto clear_hash;
791 } 801 }
792 err = crypto_hash_final(desc, md5_hash); 802 err = crypto_hash_final(desc, md5_hash);
793 if (err) { 803 if (err) {
794 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__); 804 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
795 goto clear_hash; 805 goto clear_hash;
796 } 806 }
797 807
@@ -917,7 +927,7 @@ done_opts:
917} 927}
918#endif 928#endif
919 929
920static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 930struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
921 .family = AF_INET6, 931 .family = AF_INET6,
922 .obj_size = sizeof(struct tcp6_request_sock), 932 .obj_size = sizeof(struct tcp6_request_sock),
923 .rtx_syn_ack = tcp_v6_send_synack, 933 .rtx_syn_ack = tcp_v6_send_synack,
@@ -979,6 +989,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
979 struct tcphdr *th = tcp_hdr(skb), *t1; 989 struct tcphdr *th = tcp_hdr(skb), *t1;
980 struct sk_buff *buff; 990 struct sk_buff *buff;
981 struct flowi fl; 991 struct flowi fl;
992 struct net *net = dev_net(skb->dst->dev);
993 struct sock *ctl_sk = net->ipv6.tcp_sk;
982 unsigned int tot_len = sizeof(*th); 994 unsigned int tot_len = sizeof(*th);
983#ifdef CONFIG_TCP_MD5SIG 995#ifdef CONFIG_TCP_MD5SIG
984 struct tcp_md5sig_key *key; 996 struct tcp_md5sig_key *key;
@@ -1059,11 +1071,14 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1059 fl.fl_ip_sport = t1->source; 1071 fl.fl_ip_sport = t1->source;
1060 security_skb_classify_flow(skb, &fl); 1072 security_skb_classify_flow(skb, &fl);
1061 1073
1062 /* sk = NULL, but it is safe for now. RST socket required. */ 1074 /* Pass a socket to ip6_dst_lookup either it is for RST
1063 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { 1075 * Underlying function will use this to retrieve the network
1076 * namespace
1077 */
1078 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1064 1079
1065 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1080 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1066 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); 1081 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1067 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 1082 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1068 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 1083 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1069 return; 1084 return;
@@ -1079,6 +1094,8 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1079 struct tcphdr *th = tcp_hdr(skb), *t1; 1094 struct tcphdr *th = tcp_hdr(skb), *t1;
1080 struct sk_buff *buff; 1095 struct sk_buff *buff;
1081 struct flowi fl; 1096 struct flowi fl;
1097 struct net *net = dev_net(skb->dev);
1098 struct sock *ctl_sk = net->ipv6.tcp_sk;
1082 unsigned int tot_len = sizeof(struct tcphdr); 1099 unsigned int tot_len = sizeof(struct tcphdr);
1083 __be32 *topt; 1100 __be32 *topt;
1084#ifdef CONFIG_TCP_MD5SIG 1101#ifdef CONFIG_TCP_MD5SIG
@@ -1160,9 +1177,9 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1160 fl.fl_ip_sport = t1->source; 1177 fl.fl_ip_sport = t1->source;
1161 security_skb_classify_flow(skb, &fl); 1178 security_skb_classify_flow(skb, &fl);
1162 1179
1163 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { 1180 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1164 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1181 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1165 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); 1182 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1166 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 1183 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1167 return; 1184 return;
1168 } 1185 }
@@ -1202,7 +1219,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1202 if (req) 1219 if (req)
1203 return tcp_check_req(sk, skb, req, prev); 1220 return tcp_check_req(sk, skb, req, prev);
1204 1221
1205 nsk = __inet6_lookup_established(sk->sk_net, &tcp_hashinfo, 1222 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1206 &ipv6_hdr(skb)->saddr, th->source, 1223 &ipv6_hdr(skb)->saddr, th->source,
1207 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); 1224 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1208 1225
@@ -1215,9 +1232,9 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1215 return NULL; 1232 return NULL;
1216 } 1233 }
1217 1234
1218#if 0 /*def CONFIG_SYN_COOKIES*/ 1235#ifdef CONFIG_SYN_COOKIES
1219 if (!th->rst && !th->syn && th->ack) 1236 if (!th->rst && !th->syn && th->ack)
1220 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt)); 1237 sk = cookie_v6_check(sk, skb);
1221#endif 1238#endif
1222 return sk; 1239 return sk;
1223} 1240}
@@ -1233,6 +1250,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1233 struct tcp_sock *tp = tcp_sk(sk); 1250 struct tcp_sock *tp = tcp_sk(sk);
1234 struct request_sock *req = NULL; 1251 struct request_sock *req = NULL;
1235 __u32 isn = TCP_SKB_CB(skb)->when; 1252 __u32 isn = TCP_SKB_CB(skb)->when;
1253#ifdef CONFIG_SYN_COOKIES
1254 int want_cookie = 0;
1255#else
1256#define want_cookie 0
1257#endif
1236 1258
1237 if (skb->protocol == htons(ETH_P_IP)) 1259 if (skb->protocol == htons(ETH_P_IP))
1238 return tcp_v4_conn_request(sk, skb); 1260 return tcp_v4_conn_request(sk, skb);
@@ -1240,12 +1262,14 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1240 if (!ipv6_unicast_destination(skb)) 1262 if (!ipv6_unicast_destination(skb))
1241 goto drop; 1263 goto drop;
1242 1264
1243 /*
1244 * There are no SYN attacks on IPv6, yet...
1245 */
1246 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1265 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1247 if (net_ratelimit()) 1266 if (net_ratelimit())
1248 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 1267 syn_flood_warning(skb);
1268#ifdef CONFIG_SYN_COOKIES
1269 if (sysctl_tcp_syncookies)
1270 want_cookie = 1;
1271 else
1272#endif
1249 goto drop; 1273 goto drop;
1250 } 1274 }
1251 1275
@@ -1266,39 +1290,50 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1266 1290
1267 tcp_parse_options(skb, &tmp_opt, 0); 1291 tcp_parse_options(skb, &tmp_opt, 0);
1268 1292
1293 if (want_cookie && !tmp_opt.saw_tstamp)
1294 tcp_clear_options(&tmp_opt);
1295
1269 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1296 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1270 tcp_openreq_init(req, &tmp_opt, skb); 1297 tcp_openreq_init(req, &tmp_opt, skb);
1271 1298
1272 treq = inet6_rsk(req); 1299 treq = inet6_rsk(req);
1273 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); 1300 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1274 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); 1301 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1275 TCP_ECN_create_request(req, tcp_hdr(skb));
1276 treq->pktopts = NULL; 1302 treq->pktopts = NULL;
1277 if (ipv6_opt_accepted(sk, skb) || 1303 if (!want_cookie)
1278 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1304 TCP_ECN_create_request(req, tcp_hdr(skb));
1279 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1305
1280 atomic_inc(&skb->users); 1306 if (want_cookie) {
1281 treq->pktopts = skb; 1307 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1282 } 1308 req->cookie_ts = tmp_opt.tstamp_ok;
1283 treq->iif = sk->sk_bound_dev_if; 1309 } else if (!isn) {
1310 if (ipv6_opt_accepted(sk, skb) ||
1311 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1312 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1313 atomic_inc(&skb->users);
1314 treq->pktopts = skb;
1315 }
1316 treq->iif = sk->sk_bound_dev_if;
1284 1317
1285 /* So that link locals have meaning */ 1318 /* So that link locals have meaning */
1286 if (!sk->sk_bound_dev_if && 1319 if (!sk->sk_bound_dev_if &&
1287 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1320 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1288 treq->iif = inet6_iif(skb); 1321 treq->iif = inet6_iif(skb);
1289 1322
1290 if (isn == 0)
1291 isn = tcp_v6_init_sequence(skb); 1323 isn = tcp_v6_init_sequence(skb);
1324 }
1292 1325
1293 tcp_rsk(req)->snt_isn = isn; 1326 tcp_rsk(req)->snt_isn = isn;
1294 1327
1295 security_inet_conn_request(sk, skb, req); 1328 security_inet_conn_request(sk, skb, req);
1296 1329
1297 if (tcp_v6_send_synack(sk, req, NULL)) 1330 if (tcp_v6_send_synack(sk, req))
1298 goto drop; 1331 goto drop;
1299 1332
1300 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1333 if (!want_cookie) {
1301 return 0; 1334 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1335 return 0;
1336 }
1302 1337
1303drop: 1338drop:
1304 if (req) 1339 if (req)
@@ -1499,7 +1534,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1499#endif 1534#endif
1500 1535
1501 __inet6_hash(newsk); 1536 __inet6_hash(newsk);
1502 inet_inherit_port(sk, newsk); 1537 __inet_inherit_port(sk, newsk);
1503 1538
1504 return newsk; 1539 return newsk;
1505 1540
@@ -1704,7 +1739,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1704 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); 1739 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1705 TCP_SKB_CB(skb)->sacked = 0; 1740 TCP_SKB_CB(skb)->sacked = 0;
1706 1741
1707 sk = __inet6_lookup(skb->dev->nd_net, &tcp_hashinfo, 1742 sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo,
1708 &ipv6_hdr(skb)->saddr, th->source, 1743 &ipv6_hdr(skb)->saddr, th->source,
1709 &ipv6_hdr(skb)->daddr, ntohs(th->dest), 1744 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1710 inet6_iif(skb)); 1745 inet6_iif(skb));
@@ -1787,7 +1822,7 @@ do_time_wait:
1787 { 1822 {
1788 struct sock *sk2; 1823 struct sock *sk2;
1789 1824
1790 sk2 = inet6_lookup_listener(skb->dev->nd_net, &tcp_hashinfo, 1825 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1791 &ipv6_hdr(skb)->daddr, 1826 &ipv6_hdr(skb)->daddr,
1792 ntohs(th->dest), inet6_iif(skb)); 1827 ntohs(th->dest), inet6_iif(skb));
1793 if (sk2 != NULL) { 1828 if (sk2 != NULL) {
@@ -2085,28 +2120,28 @@ out:
2085 return 0; 2120 return 0;
2086} 2121}
2087 2122
2088static struct file_operations tcp6_seq_fops;
2089static struct tcp_seq_afinfo tcp6_seq_afinfo = { 2123static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2090 .owner = THIS_MODULE,
2091 .name = "tcp6", 2124 .name = "tcp6",
2092 .family = AF_INET6, 2125 .family = AF_INET6,
2093 .seq_show = tcp6_seq_show, 2126 .seq_fops = {
2094 .seq_fops = &tcp6_seq_fops, 2127 .owner = THIS_MODULE,
2128 },
2129 .seq_ops = {
2130 .show = tcp6_seq_show,
2131 },
2095}; 2132};
2096 2133
2097int __init tcp6_proc_init(void) 2134int tcp6_proc_init(struct net *net)
2098{ 2135{
2099 return tcp_proc_register(&tcp6_seq_afinfo); 2136 return tcp_proc_register(net, &tcp6_seq_afinfo);
2100} 2137}
2101 2138
2102void tcp6_proc_exit(void) 2139void tcp6_proc_exit(struct net *net)
2103{ 2140{
2104 tcp_proc_unregister(&tcp6_seq_afinfo); 2141 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2105} 2142}
2106#endif 2143#endif
2107 2144
2108DEFINE_PROTO_INUSE(tcpv6)
2109
2110struct proto tcpv6_prot = { 2145struct proto tcpv6_prot = {
2111 .name = "TCPv6", 2146 .name = "TCPv6",
2112 .owner = THIS_MODULE, 2147 .owner = THIS_MODULE,
@@ -2137,12 +2172,11 @@ struct proto tcpv6_prot = {
2137 .obj_size = sizeof(struct tcp6_sock), 2172 .obj_size = sizeof(struct tcp6_sock),
2138 .twsk_prot = &tcp6_timewait_sock_ops, 2173 .twsk_prot = &tcp6_timewait_sock_ops,
2139 .rsk_prot = &tcp6_request_sock_ops, 2174 .rsk_prot = &tcp6_request_sock_ops,
2140 .hashinfo = &tcp_hashinfo, 2175 .h.hashinfo = &tcp_hashinfo,
2141#ifdef CONFIG_COMPAT 2176#ifdef CONFIG_COMPAT
2142 .compat_setsockopt = compat_tcp_setsockopt, 2177 .compat_setsockopt = compat_tcp_setsockopt,
2143 .compat_getsockopt = compat_tcp_getsockopt, 2178 .compat_getsockopt = compat_tcp_getsockopt,
2144#endif 2179#endif
2145 REF_PROTO_INUSE(tcpv6)
2146}; 2180};
2147 2181
2148static struct inet6_protocol tcpv6_protocol = { 2182static struct inet6_protocol tcpv6_protocol = {
@@ -2164,6 +2198,22 @@ static struct inet_protosw tcpv6_protosw = {
2164 INET_PROTOSW_ICSK, 2198 INET_PROTOSW_ICSK,
2165}; 2199};
2166 2200
2201static int tcpv6_net_init(struct net *net)
2202{
2203 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2204 SOCK_RAW, IPPROTO_TCP, net);
2205}
2206
2207static void tcpv6_net_exit(struct net *net)
2208{
2209 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2210}
2211
2212static struct pernet_operations tcpv6_net_ops = {
2213 .init = tcpv6_net_init,
2214 .exit = tcpv6_net_exit,
2215};
2216
2167int __init tcpv6_init(void) 2217int __init tcpv6_init(void)
2168{ 2218{
2169 int ret; 2219 int ret;
@@ -2177,8 +2227,7 @@ int __init tcpv6_init(void)
2177 if (ret) 2227 if (ret)
2178 goto out_tcpv6_protocol; 2228 goto out_tcpv6_protocol;
2179 2229
2180 ret = inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, 2230 ret = register_pernet_subsys(&tcpv6_net_ops);
2181 SOCK_RAW, IPPROTO_TCP);
2182 if (ret) 2231 if (ret)
2183 goto out_tcpv6_protosw; 2232 goto out_tcpv6_protosw;
2184out: 2233out:
@@ -2193,7 +2242,7 @@ out_tcpv6_protosw:
2193 2242
2194void tcpv6_exit(void) 2243void tcpv6_exit(void)
2195{ 2244{
2196 sock_release(tcp6_socket); 2245 unregister_pernet_subsys(&tcpv6_net_ops);
2197 inet6_unregister_protosw(&tcpv6_protosw); 2246 inet6_unregister_protosw(&tcpv6_protosw);
2198 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2247 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2199} 2248}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 53739de829db..1fd784f3e2ec 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -51,9 +51,9 @@
51#include <linux/seq_file.h> 51#include <linux/seq_file.h>
52#include "udp_impl.h" 52#include "udp_impl.h"
53 53
54static inline int udp_v6_get_port(struct sock *sk, unsigned short snum) 54int udp_v6_get_port(struct sock *sk, unsigned short snum)
55{ 55{
56 return udp_get_port(sk, snum, ipv6_rcv_saddr_equal); 56 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal);
57} 57}
58 58
59static struct sock *__udp6_lib_lookup(struct net *net, 59static struct sock *__udp6_lib_lookup(struct net *net,
@@ -70,7 +70,7 @@ static struct sock *__udp6_lib_lookup(struct net *net,
70 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 70 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
71 struct inet_sock *inet = inet_sk(sk); 71 struct inet_sock *inet = inet_sk(sk);
72 72
73 if (sk->sk_net == net && sk->sk_hash == hnum && 73 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
74 sk->sk_family == PF_INET6) { 74 sk->sk_family == PF_INET6) {
75 struct ipv6_pinfo *np = inet6_sk(sk); 75 struct ipv6_pinfo *np = inet6_sk(sk);
76 int score = 0; 76 int score = 0;
@@ -235,7 +235,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
235 struct sock *sk; 235 struct sock *sk;
236 int err; 236 int err;
237 237
238 sk = __udp6_lib_lookup(skb->dev->nd_net, daddr, uh->dest, 238 sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest,
239 saddr, uh->source, inet6_iif(skb), udptable); 239 saddr, uh->source, inet6_iif(skb), udptable);
240 if (sk == NULL) 240 if (sk == NULL)
241 return; 241 return;
@@ -323,6 +323,9 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
323 sk_for_each_from(s, node) { 323 sk_for_each_from(s, node) {
324 struct inet_sock *inet = inet_sk(s); 324 struct inet_sock *inet = inet_sk(s);
325 325
326 if (sock_net(s) != sock_net(sk))
327 continue;
328
326 if (s->sk_hash == num && s->sk_family == PF_INET6) { 329 if (s->sk_hash == num && s->sk_family == PF_INET6) {
327 struct ipv6_pinfo *np = inet6_sk(s); 330 struct ipv6_pinfo *np = inet6_sk(s);
328 if (inet->dport) { 331 if (inet->dport) {
@@ -480,7 +483,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
480 * check socket cache ... must talk to Alan about his plans 483 * check socket cache ... must talk to Alan about his plans
481 * for sock caches... i'll skip this for now. 484 * for sock caches... i'll skip this for now.
482 */ 485 */
483 sk = __udp6_lib_lookup(skb->dev->nd_net, saddr, uh->source, 486 sk = __udp6_lib_lookup(dev_net(skb->dev), saddr, uh->source,
484 daddr, uh->dest, inet6_iif(skb), udptable); 487 daddr, uh->dest, inet6_iif(skb), udptable);
485 488
486 if (sk == NULL) { 489 if (sk == NULL) {
@@ -749,7 +752,10 @@ do_udp_sendmsg:
749 opt = ipv6_fixup_options(&opt_space, opt); 752 opt = ipv6_fixup_options(&opt_space, opt);
750 753
751 fl.proto = sk->sk_protocol; 754 fl.proto = sk->sk_protocol;
752 ipv6_addr_copy(&fl.fl6_dst, daddr); 755 if (!ipv6_addr_any(daddr))
756 ipv6_addr_copy(&fl.fl6_dst, daddr);
757 else
758 fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
753 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) 759 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
754 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 760 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
755 fl.fl_ip_sport = inet->sport; 761 fl.fl_ip_sport = inet->sport;
@@ -789,9 +795,7 @@ do_udp_sendmsg:
789 else 795 else
790 hlimit = np->hop_limit; 796 hlimit = np->hop_limit;
791 if (hlimit < 0) 797 if (hlimit < 0)
792 hlimit = dst_metric(dst, RTAX_HOPLIMIT); 798 hlimit = ip6_dst_hoplimit(dst);
793 if (hlimit < 0)
794 hlimit = ipv6_get_hoplimit(dst->dev);
795 } 799 }
796 800
797 if (tclass < 0) { 801 if (tclass < 0) {
@@ -976,30 +980,30 @@ int udp6_seq_show(struct seq_file *seq, void *v)
976 return 0; 980 return 0;
977} 981}
978 982
979static struct file_operations udp6_seq_fops;
980static struct udp_seq_afinfo udp6_seq_afinfo = { 983static struct udp_seq_afinfo udp6_seq_afinfo = {
981 .owner = THIS_MODULE,
982 .name = "udp6", 984 .name = "udp6",
983 .family = AF_INET6, 985 .family = AF_INET6,
984 .hashtable = udp_hash, 986 .hashtable = udp_hash,
985 .seq_show = udp6_seq_show, 987 .seq_fops = {
986 .seq_fops = &udp6_seq_fops, 988 .owner = THIS_MODULE,
989 },
990 .seq_ops = {
991 .show = udp6_seq_show,
992 },
987}; 993};
988 994
989int __init udp6_proc_init(void) 995int udp6_proc_init(struct net *net)
990{ 996{
991 return udp_proc_register(&udp6_seq_afinfo); 997 return udp_proc_register(net, &udp6_seq_afinfo);
992} 998}
993 999
994void udp6_proc_exit(void) { 1000void udp6_proc_exit(struct net *net) {
995 udp_proc_unregister(&udp6_seq_afinfo); 1001 udp_proc_unregister(net, &udp6_seq_afinfo);
996} 1002}
997#endif /* CONFIG_PROC_FS */ 1003#endif /* CONFIG_PROC_FS */
998 1004
999/* ------------------------------------------------------------------------ */ 1005/* ------------------------------------------------------------------------ */
1000 1006
1001DEFINE_PROTO_INUSE(udpv6)
1002
1003struct proto udpv6_prot = { 1007struct proto udpv6_prot = {
1004 .name = "UDPv6", 1008 .name = "UDPv6",
1005 .owner = THIS_MODULE, 1009 .owner = THIS_MODULE,
@@ -1021,11 +1025,11 @@ struct proto udpv6_prot = {
1021 .sysctl_wmem = &sysctl_udp_wmem_min, 1025 .sysctl_wmem = &sysctl_udp_wmem_min,
1022 .sysctl_rmem = &sysctl_udp_rmem_min, 1026 .sysctl_rmem = &sysctl_udp_rmem_min,
1023 .obj_size = sizeof(struct udp6_sock), 1027 .obj_size = sizeof(struct udp6_sock),
1028 .h.udp_hash = udp_hash,
1024#ifdef CONFIG_COMPAT 1029#ifdef CONFIG_COMPAT
1025 .compat_setsockopt = compat_udpv6_setsockopt, 1030 .compat_setsockopt = compat_udpv6_setsockopt,
1026 .compat_getsockopt = compat_udpv6_getsockopt, 1031 .compat_getsockopt = compat_udpv6_getsockopt,
1027#endif 1032#endif
1028 REF_PROTO_INUSE(udpv6)
1029}; 1033};
1030 1034
1031static struct inet_protosw udpv6_protosw = { 1035static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 21be3a83e7bc..321b81a4d418 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -11,6 +11,8 @@ extern int __udp6_lib_rcv(struct sk_buff *, struct hlist_head [], int );
11extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, 11extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
12 int , int , int , __be32 , struct hlist_head []); 12 int , int , int , __be32 , struct hlist_head []);
13 13
14extern int udp_v6_get_port(struct sock *sk, unsigned short snum);
15
14extern int udpv6_getsockopt(struct sock *sk, int level, int optname, 16extern int udpv6_getsockopt(struct sock *sk, int level, int optname,
15 char __user *optval, int __user *optlen); 17 char __user *optval, int __user *optlen);
16extern int udpv6_setsockopt(struct sock *sk, int level, int optname, 18extern int udpv6_setsockopt(struct sock *sk, int level, int optname,
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 87d4202522ee..491efd00a866 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -35,13 +35,6 @@ static struct inet6_protocol udplitev6_protocol = {
35 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 35 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
36}; 36};
37 37
38static int udplite_v6_get_port(struct sock *sk, unsigned short snum)
39{
40 return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal);
41}
42
43DEFINE_PROTO_INUSE(udplitev6)
44
45struct proto udplitev6_prot = { 38struct proto udplitev6_prot = {
46 .name = "UDPLITEv6", 39 .name = "UDPLITEv6",
47 .owner = THIS_MODULE, 40 .owner = THIS_MODULE,
@@ -58,13 +51,13 @@ struct proto udplitev6_prot = {
58 .backlog_rcv = udpv6_queue_rcv_skb, 51 .backlog_rcv = udpv6_queue_rcv_skb,
59 .hash = udp_lib_hash, 52 .hash = udp_lib_hash,
60 .unhash = udp_lib_unhash, 53 .unhash = udp_lib_unhash,
61 .get_port = udplite_v6_get_port, 54 .get_port = udp_v6_get_port,
62 .obj_size = sizeof(struct udp6_sock), 55 .obj_size = sizeof(struct udp6_sock),
56 .h.udp_hash = udplite_hash,
63#ifdef CONFIG_COMPAT 57#ifdef CONFIG_COMPAT
64 .compat_setsockopt = compat_udpv6_setsockopt, 58 .compat_setsockopt = compat_udpv6_setsockopt,
65 .compat_getsockopt = compat_udpv6_getsockopt, 59 .compat_getsockopt = compat_udpv6_getsockopt,
66#endif 60#endif
67 REF_PROTO_INUSE(udplitev6)
68}; 61};
69 62
70static struct inet_protosw udplite6_protosw = { 63static struct inet_protosw udplite6_protosw = {
@@ -103,23 +96,40 @@ void udplitev6_exit(void)
103} 96}
104 97
105#ifdef CONFIG_PROC_FS 98#ifdef CONFIG_PROC_FS
106static struct file_operations udplite6_seq_fops;
107static struct udp_seq_afinfo udplite6_seq_afinfo = { 99static struct udp_seq_afinfo udplite6_seq_afinfo = {
108 .owner = THIS_MODULE,
109 .name = "udplite6", 100 .name = "udplite6",
110 .family = AF_INET6, 101 .family = AF_INET6,
111 .hashtable = udplite_hash, 102 .hashtable = udplite_hash,
112 .seq_show = udp6_seq_show, 103 .seq_fops = {
113 .seq_fops = &udplite6_seq_fops, 104 .owner = THIS_MODULE,
105 },
106 .seq_ops = {
107 .show = udp6_seq_show,
108 },
109};
110
111static int udplite6_proc_init_net(struct net *net)
112{
113 return udp_proc_register(net, &udplite6_seq_afinfo);
114}
115
116static void udplite6_proc_exit_net(struct net *net)
117{
118 udp_proc_unregister(net, &udplite6_seq_afinfo);
119}
120
121static struct pernet_operations udplite6_net_ops = {
122 .init = udplite6_proc_init_net,
123 .exit = udplite6_proc_exit_net,
114}; 124};
115 125
116int __init udplite6_proc_init(void) 126int __init udplite6_proc_init(void)
117{ 127{
118 return udp_proc_register(&udplite6_seq_afinfo); 128 return register_pernet_subsys(&udplite6_net_ops);
119} 129}
120 130
121void udplite6_proc_exit(void) 131void udplite6_proc_exit(void)
122{ 132{
123 udp_proc_unregister(&udplite6_seq_afinfo); 133 unregister_pernet_subsys(&udplite6_net_ops);
124} 134}
125#endif 135#endif
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index a4714d76ae6b..a71c7ddcb41e 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -59,9 +59,6 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
59 xfrm_address_t *saddr, u8 proto) 59 xfrm_address_t *saddr, u8 proto)
60{ 60{
61 struct xfrm_state *x = NULL; 61 struct xfrm_state *x = NULL;
62 int wildcard = 0;
63 xfrm_address_t *xany;
64 int nh = 0;
65 int i = 0; 62 int i = 0;
66 63
67 /* Allocate new secpath or COW existing one. */ 64 /* Allocate new secpath or COW existing one. */
@@ -83,10 +80,9 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
83 goto drop; 80 goto drop;
84 } 81 }
85 82
86 xany = (xfrm_address_t *)&in6addr_any;
87
88 for (i = 0; i < 3; i++) { 83 for (i = 0; i < 3; i++) {
89 xfrm_address_t *dst, *src; 84 xfrm_address_t *dst, *src;
85
90 switch (i) { 86 switch (i) {
91 case 0: 87 case 0:
92 dst = daddr; 88 dst = daddr;
@@ -94,16 +90,13 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
94 break; 90 break;
95 case 1: 91 case 1:
96 /* lookup state with wild-card source address */ 92 /* lookup state with wild-card source address */
97 wildcard = 1;
98 dst = daddr; 93 dst = daddr;
99 src = xany; 94 src = (xfrm_address_t *)&in6addr_any;
100 break; 95 break;
101 case 2:
102 default: 96 default:
103 /* lookup state with wild-card addresses */ 97 /* lookup state with wild-card addresses */
104 wildcard = 1; /* XXX */ 98 dst = (xfrm_address_t *)&in6addr_any;
105 dst = xany; 99 src = (xfrm_address_t *)&in6addr_any;
106 src = xany;
107 break; 100 break;
108 } 101 }
109 102
@@ -113,39 +106,19 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
113 106
114 spin_lock(&x->lock); 107 spin_lock(&x->lock);
115 108
116 if (wildcard) { 109 if ((!i || (x->props.flags & XFRM_STATE_WILDRECV)) &&
117 if ((x->props.flags & XFRM_STATE_WILDRECV) == 0) { 110 likely(x->km.state == XFRM_STATE_VALID) &&
118 spin_unlock(&x->lock); 111 !xfrm_state_check_expire(x)) {
119 xfrm_state_put(x);
120 x = NULL;
121 continue;
122 }
123 }
124
125 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
126 spin_unlock(&x->lock); 112 spin_unlock(&x->lock);
127 xfrm_state_put(x); 113 if (x->type->input(x, skb) > 0) {
128 x = NULL; 114 /* found a valid state */
129 continue; 115 break;
130 } 116 }
131 if (xfrm_state_check_expire(x)) { 117 } else
132 spin_unlock(&x->lock); 118 spin_unlock(&x->lock);
133 xfrm_state_put(x);
134 x = NULL;
135 continue;
136 }
137
138 spin_unlock(&x->lock);
139
140 nh = x->type->input(x, skb);
141 if (nh <= 0) {
142 xfrm_state_put(x);
143 x = NULL;
144 continue;
145 }
146 119
147 /* Found a state */ 120 xfrm_state_put(x);
148 break; 121 x = NULL;
149 } 122 }
150 123
151 if (!x) { 124 if (!x) {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7d20199ee1f3..8f1e0543b3c4 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -38,7 +38,7 @@ static struct dst_entry *xfrm6_dst_lookup(int tos, xfrm_address_t *saddr,
38 if (saddr) 38 if (saddr)
39 memcpy(&fl.fl6_src, saddr, sizeof(fl.fl6_src)); 39 memcpy(&fl.fl6_src, saddr, sizeof(fl.fl6_src));
40 40
41 dst = ip6_route_output(NULL, &fl); 41 dst = ip6_route_output(&init_net, NULL, &fl);
42 42
43 err = dst->error; 43 err = dst->error;
44 if (dst->error) { 44 if (dst->error) {
@@ -57,8 +57,9 @@ static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr)
57 if (IS_ERR(dst)) 57 if (IS_ERR(dst))
58 return -EHOSTUNREACH; 58 return -EHOSTUNREACH;
59 59
60 ipv6_get_saddr(dst, (struct in6_addr *)&daddr->a6, 60 ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev,
61 (struct in6_addr *)&saddr->a6); 61 (struct in6_addr *)&daddr->a6, 0,
62 (struct in6_addr *)&saddr->a6);
62 dst_release(dst); 63 dst_release(dst);
63 return 0; 64 return 0;
64} 65}
@@ -246,7 +247,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
246 xdst = (struct xfrm_dst *)dst; 247 xdst = (struct xfrm_dst *)dst;
247 if (xdst->u.rt6.rt6i_idev->dev == dev) { 248 if (xdst->u.rt6.rt6i_idev->dev == dev) {
248 struct inet6_dev *loopback_idev = 249 struct inet6_dev *loopback_idev =
249 in6_dev_get(dev->nd_net->loopback_dev); 250 in6_dev_get(dev_net(dev)->loopback_dev);
250 BUG_ON(!loopback_idev); 251 BUG_ON(!loopback_idev);
251 252
252 do { 253 do {
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index ff1e1db8e236..89884a4f23aa 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -49,125 +49,102 @@ __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
49 x->props.family = AF_INET6; 49 x->props.family = AF_INET6;
50} 50}
51 51
52/* distribution counting sort function for xfrm_state and xfrm_tmpl */
52static int 53static int
53__xfrm6_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n) 54__xfrm6_sort(void **dst, void **src, int n, int (*cmp)(void *p), int maxclass)
54{ 55{
55 int i; 56 int i;
56 int j = 0; 57 int class[XFRM_MAX_DEPTH];
58 int count[maxclass];
57 59
58 /* Rule 1: select IPsec transport except AH */ 60 memset(count, 0, sizeof(count));
59 for (i = 0; i < n; i++) {
60 if (src[i]->props.mode == XFRM_MODE_TRANSPORT &&
61 src[i]->id.proto != IPPROTO_AH) {
62 dst[j++] = src[i];
63 src[i] = NULL;
64 }
65 }
66 if (j == n)
67 goto end;
68 61
69 /* Rule 2: select MIPv6 RO or inbound trigger */
70#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
71 for (i = 0; i < n; i++) { 62 for (i = 0; i < n; i++) {
72 if (src[i] && 63 int c;
73 (src[i]->props.mode == XFRM_MODE_ROUTEOPTIMIZATION || 64 class[i] = c = cmp(src[i]);
74 src[i]->props.mode == XFRM_MODE_IN_TRIGGER)) { 65 count[c]++;
75 dst[j++] = src[i];
76 src[i] = NULL;
77 }
78 } 66 }
79 if (j == n)
80 goto end;
81#endif
82 67
83 /* Rule 3: select IPsec transport AH */ 68 for (i = 2; i < maxclass; i++)
84 for (i = 0; i < n; i++) { 69 count[i] += count[i - 1];
85 if (src[i] &&
86 src[i]->props.mode == XFRM_MODE_TRANSPORT &&
87 src[i]->id.proto == IPPROTO_AH) {
88 dst[j++] = src[i];
89 src[i] = NULL;
90 }
91 }
92 if (j == n)
93 goto end;
94 70
95 /* Rule 4: select IPsec tunnel */
96 for (i = 0; i < n; i++) { 71 for (i = 0; i < n; i++) {
97 if (src[i] && 72 dst[count[class[i] - 1]++] = src[i];
98 (src[i]->props.mode == XFRM_MODE_TUNNEL || 73 src[i] = 0;
99 src[i]->props.mode == XFRM_MODE_BEET)) {
100 dst[j++] = src[i];
101 src[i] = NULL;
102 }
103 } 74 }
104 if (likely(j == n))
105 goto end;
106 75
107 /* Final rule */
108 for (i = 0; i < n; i++) {
109 if (src[i]) {
110 dst[j++] = src[i];
111 src[i] = NULL;
112 }
113 }
114
115 end:
116 return 0; 76 return 0;
117} 77}
118 78
119static int 79/*
120__xfrm6_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n) 80 * Rule for xfrm_state:
81 *
82 * rule 1: select IPsec transport except AH
83 * rule 2: select MIPv6 RO or inbound trigger
84 * rule 3: select IPsec transport AH
85 * rule 4: select IPsec tunnel
86 * rule 5: others
87 */
88static int __xfrm6_state_sort_cmp(void *p)
121{ 89{
122 int i; 90 struct xfrm_state *v = p;
123 int j = 0; 91
124 92 switch (v->props.mode) {
125 /* Rule 1: select IPsec transport */ 93 case XFRM_MODE_TRANSPORT:
126 for (i = 0; i < n; i++) { 94 if (v->id.proto != IPPROTO_AH)
127 if (src[i]->mode == XFRM_MODE_TRANSPORT) { 95 return 1;
128 dst[j++] = src[i]; 96 else
129 src[i] = NULL; 97 return 3;
130 }
131 }
132 if (j == n)
133 goto end;
134
135 /* Rule 2: select MIPv6 RO or inbound trigger */
136#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 98#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
137 for (i = 0; i < n; i++) { 99 case XFRM_MODE_ROUTEOPTIMIZATION:
138 if (src[i] && 100 case XFRM_MODE_IN_TRIGGER:
139 (src[i]->mode == XFRM_MODE_ROUTEOPTIMIZATION || 101 return 2;
140 src[i]->mode == XFRM_MODE_IN_TRIGGER)) {
141 dst[j++] = src[i];
142 src[i] = NULL;
143 }
144 }
145 if (j == n)
146 goto end;
147#endif 102#endif
148 103 case XFRM_MODE_TUNNEL:
149 /* Rule 3: select IPsec tunnel */ 104 case XFRM_MODE_BEET:
150 for (i = 0; i < n; i++) { 105 return 4;
151 if (src[i] &&
152 (src[i]->mode == XFRM_MODE_TUNNEL ||
153 src[i]->mode == XFRM_MODE_BEET)) {
154 dst[j++] = src[i];
155 src[i] = NULL;
156 }
157 } 106 }
158 if (likely(j == n)) 107 return 5;
159 goto end; 108}
160 109
161 /* Final rule */ 110static int
162 for (i = 0; i < n; i++) { 111__xfrm6_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n)
163 if (src[i]) { 112{
164 dst[j++] = src[i]; 113 return __xfrm6_sort((void **)dst, (void **)src, n,
165 src[i] = NULL; 114 __xfrm6_state_sort_cmp, 6);
166 } 115}
116
117/*
118 * Rule for xfrm_tmpl:
119 *
120 * rule 1: select IPsec transport
121 * rule 2: select MIPv6 RO or inbound trigger
122 * rule 3: select IPsec tunnel
123 * rule 4: others
124 */
125static int __xfrm6_tmpl_sort_cmp(void *p)
126{
127 struct xfrm_tmpl *v = p;
128 switch (v->mode) {
129 case XFRM_MODE_TRANSPORT:
130 return 1;
131#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
132 case XFRM_MODE_ROUTEOPTIMIZATION:
133 case XFRM_MODE_IN_TRIGGER:
134 return 2;
135#endif
136 case XFRM_MODE_TUNNEL:
137 case XFRM_MODE_BEET:
138 return 3;
167 } 139 }
140 return 4;
141}
168 142
169 end: 143static int
170 return 0; 144__xfrm6_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n)
145{
146 return __xfrm6_sort((void **)dst, (void **)src, n,
147 __xfrm6_tmpl_sort_cmp, 5);
171} 148}
172 149
173int xfrm6_extract_header(struct sk_buff *skb) 150int xfrm6_extract_header(struct sk_buff *skb)
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 639fe8a6ff1e..c2b278138604 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -140,12 +140,26 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
140 140
141EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); 141EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
142 142
143static int __xfrm6_tunnel_spi_check(u32 spi)
144{
145 struct xfrm6_tunnel_spi *x6spi;
146 int index = xfrm6_tunnel_spi_hash_byspi(spi);
147 struct hlist_node *pos;
148
149 hlist_for_each_entry(x6spi, pos,
150 &xfrm6_tunnel_spi_byspi[index],
151 list_byspi) {
152 if (x6spi->spi == spi)
153 return -1;
154 }
155 return index;
156}
157
143static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 158static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
144{ 159{
145 u32 spi; 160 u32 spi;
146 struct xfrm6_tunnel_spi *x6spi; 161 struct xfrm6_tunnel_spi *x6spi;
147 struct hlist_node *pos; 162 int index;
148 unsigned index;
149 163
150 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 164 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
151 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 165 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
@@ -154,32 +168,19 @@ static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
154 xfrm6_tunnel_spi++; 168 xfrm6_tunnel_spi++;
155 169
156 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { 170 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
157 index = xfrm6_tunnel_spi_hash_byspi(spi); 171 index = __xfrm6_tunnel_spi_check(spi);
158 hlist_for_each_entry(x6spi, pos, 172 if (index >= 0)
159 &xfrm6_tunnel_spi_byspi[index], 173 goto alloc_spi;
160 list_byspi) {
161 if (x6spi->spi == spi)
162 goto try_next_1;
163 }
164 xfrm6_tunnel_spi = spi;
165 goto alloc_spi;
166try_next_1:;
167 } 174 }
168 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) { 175 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) {
169 index = xfrm6_tunnel_spi_hash_byspi(spi); 176 index = __xfrm6_tunnel_spi_check(spi);
170 hlist_for_each_entry(x6spi, pos, 177 if (index >= 0)
171 &xfrm6_tunnel_spi_byspi[index], 178 goto alloc_spi;
172 list_byspi) {
173 if (x6spi->spi == spi)
174 goto try_next_2;
175 }
176 xfrm6_tunnel_spi = spi;
177 goto alloc_spi;
178try_next_2:;
179 } 179 }
180 spi = 0; 180 spi = 0;
181 goto out; 181 goto out;
182alloc_spi: 182alloc_spi:
183 xfrm6_tunnel_spi = spi;
183 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); 184 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
184 if (!x6spi) 185 if (!x6spi)
185 goto out; 186 goto out;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index c76a9523091b..81ae8735f5e3 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -335,7 +335,7 @@ static int ipxitf_device_event(struct notifier_block *notifier,
335 struct net_device *dev = ptr; 335 struct net_device *dev = ptr;
336 struct ipx_interface *i, *tmp; 336 struct ipx_interface *i, *tmp;
337 337
338 if (dev->nd_net != &init_net) 338 if (dev_net(dev) != &init_net)
339 return NOTIFY_DONE; 339 return NOTIFY_DONE;
340 340
341 if (event != NETDEV_DOWN && event != NETDEV_UP) 341 if (event != NETDEV_DOWN && event != NETDEV_UP)
@@ -1636,7 +1636,7 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
1636 u16 ipx_pktsize; 1636 u16 ipx_pktsize;
1637 int rc = 0; 1637 int rc = 0;
1638 1638
1639 if (dev->nd_net != &init_net) 1639 if (dev_net(dev) != &init_net)
1640 goto drop; 1640 goto drop;
1641 1641
1642 /* Not ours */ 1642 /* Not ours */
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 240b0cbfb532..ae54b20d0470 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -85,14 +85,14 @@ static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb)
85 struct sock *sk; 85 struct sock *sk;
86 int err; 86 int err;
87 87
88 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 88 IRDA_DEBUG(3, "%s()\n", __func__);
89 89
90 self = instance; 90 self = instance;
91 sk = instance; 91 sk = instance;
92 92
93 err = sock_queue_rcv_skb(sk, skb); 93 err = sock_queue_rcv_skb(sk, skb);
94 if (err) { 94 if (err) {
95 IRDA_DEBUG(1, "%s(), error: no more mem!\n", __FUNCTION__); 95 IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__);
96 self->rx_flow = FLOW_STOP; 96 self->rx_flow = FLOW_STOP;
97 97
98 /* When we return error, TTP will need to requeue the skb */ 98 /* When we return error, TTP will need to requeue the skb */
@@ -116,7 +116,7 @@ static void irda_disconnect_indication(void *instance, void *sap,
116 116
117 self = instance; 117 self = instance;
118 118
119 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 119 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
120 120
121 /* Don't care about it, but let's not leak it */ 121 /* Don't care about it, but let's not leak it */
122 if(skb) 122 if(skb)
@@ -125,7 +125,7 @@ static void irda_disconnect_indication(void *instance, void *sap,
125 sk = instance; 125 sk = instance;
126 if (sk == NULL) { 126 if (sk == NULL) {
127 IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n", 127 IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n",
128 __FUNCTION__, self); 128 __func__, self);
129 return; 129 return;
130 } 130 }
131 131
@@ -181,7 +181,7 @@ static void irda_connect_confirm(void *instance, void *sap,
181 181
182 self = instance; 182 self = instance;
183 183
184 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 184 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
185 185
186 sk = instance; 186 sk = instance;
187 if (sk == NULL) { 187 if (sk == NULL) {
@@ -203,7 +203,7 @@ static void irda_connect_confirm(void *instance, void *sap,
203 case SOCK_STREAM: 203 case SOCK_STREAM:
204 if (max_sdu_size != 0) { 204 if (max_sdu_size != 0) {
205 IRDA_ERROR("%s: max_sdu_size must be 0\n", 205 IRDA_ERROR("%s: max_sdu_size must be 0\n",
206 __FUNCTION__); 206 __func__);
207 return; 207 return;
208 } 208 }
209 self->max_data_size = irttp_get_max_seg_size(self->tsap); 209 self->max_data_size = irttp_get_max_seg_size(self->tsap);
@@ -211,7 +211,7 @@ static void irda_connect_confirm(void *instance, void *sap,
211 case SOCK_SEQPACKET: 211 case SOCK_SEQPACKET:
212 if (max_sdu_size == 0) { 212 if (max_sdu_size == 0) {
213 IRDA_ERROR("%s: max_sdu_size cannot be 0\n", 213 IRDA_ERROR("%s: max_sdu_size cannot be 0\n",
214 __FUNCTION__); 214 __func__);
215 return; 215 return;
216 } 216 }
217 self->max_data_size = max_sdu_size; 217 self->max_data_size = max_sdu_size;
@@ -220,7 +220,7 @@ static void irda_connect_confirm(void *instance, void *sap,
220 self->max_data_size = irttp_get_max_seg_size(self->tsap); 220 self->max_data_size = irttp_get_max_seg_size(self->tsap);
221 } 221 }
222 222
223 IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __FUNCTION__, 223 IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__,
224 self->max_data_size); 224 self->max_data_size);
225 225
226 memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); 226 memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
@@ -245,7 +245,7 @@ static void irda_connect_indication(void *instance, void *sap,
245 245
246 self = instance; 246 self = instance;
247 247
248 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 248 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
249 249
250 sk = instance; 250 sk = instance;
251 if (sk == NULL) { 251 if (sk == NULL) {
@@ -264,7 +264,7 @@ static void irda_connect_indication(void *instance, void *sap,
264 case SOCK_STREAM: 264 case SOCK_STREAM:
265 if (max_sdu_size != 0) { 265 if (max_sdu_size != 0) {
266 IRDA_ERROR("%s: max_sdu_size must be 0\n", 266 IRDA_ERROR("%s: max_sdu_size must be 0\n",
267 __FUNCTION__); 267 __func__);
268 kfree_skb(skb); 268 kfree_skb(skb);
269 return; 269 return;
270 } 270 }
@@ -273,7 +273,7 @@ static void irda_connect_indication(void *instance, void *sap,
273 case SOCK_SEQPACKET: 273 case SOCK_SEQPACKET:
274 if (max_sdu_size == 0) { 274 if (max_sdu_size == 0) {
275 IRDA_ERROR("%s: max_sdu_size cannot be 0\n", 275 IRDA_ERROR("%s: max_sdu_size cannot be 0\n",
276 __FUNCTION__); 276 __func__);
277 kfree_skb(skb); 277 kfree_skb(skb);
278 return; 278 return;
279 } 279 }
@@ -283,7 +283,7 @@ static void irda_connect_indication(void *instance, void *sap,
283 self->max_data_size = irttp_get_max_seg_size(self->tsap); 283 self->max_data_size = irttp_get_max_seg_size(self->tsap);
284 } 284 }
285 285
286 IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __FUNCTION__, 286 IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__,
287 self->max_data_size); 287 self->max_data_size);
288 288
289 memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); 289 memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
@@ -302,13 +302,13 @@ static void irda_connect_response(struct irda_sock *self)
302{ 302{
303 struct sk_buff *skb; 303 struct sk_buff *skb;
304 304
305 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 305 IRDA_DEBUG(2, "%s()\n", __func__);
306 306
307 skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, 307 skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
308 GFP_ATOMIC); 308 GFP_ATOMIC);
309 if (skb == NULL) { 309 if (skb == NULL) {
310 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", 310 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n",
311 __FUNCTION__); 311 __func__);
312 return; 312 return;
313 } 313 }
314 314
@@ -329,7 +329,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
329 struct irda_sock *self; 329 struct irda_sock *self;
330 struct sock *sk; 330 struct sock *sk;
331 331
332 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 332 IRDA_DEBUG(2, "%s()\n", __func__);
333 333
334 self = instance; 334 self = instance;
335 sk = instance; 335 sk = instance;
@@ -338,17 +338,17 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
338 switch (flow) { 338 switch (flow) {
339 case FLOW_STOP: 339 case FLOW_STOP:
340 IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", 340 IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n",
341 __FUNCTION__); 341 __func__);
342 self->tx_flow = flow; 342 self->tx_flow = flow;
343 break; 343 break;
344 case FLOW_START: 344 case FLOW_START:
345 self->tx_flow = flow; 345 self->tx_flow = flow;
346 IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", 346 IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
347 __FUNCTION__); 347 __func__);
348 wake_up_interruptible(sk->sk_sleep); 348 wake_up_interruptible(sk->sk_sleep);
349 break; 349 break;
350 default: 350 default:
351 IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __FUNCTION__); 351 IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__);
352 /* Unknown flow command, better stop */ 352 /* Unknown flow command, better stop */
353 self->tx_flow = flow; 353 self->tx_flow = flow;
354 break; 354 break;
@@ -370,11 +370,11 @@ static void irda_getvalue_confirm(int result, __u16 obj_id,
370 370
371 self = (struct irda_sock *) priv; 371 self = (struct irda_sock *) priv;
372 if (!self) { 372 if (!self) {
373 IRDA_WARNING("%s: lost myself!\n", __FUNCTION__); 373 IRDA_WARNING("%s: lost myself!\n", __func__);
374 return; 374 return;
375 } 375 }
376 376
377 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 377 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
378 378
379 /* We probably don't need to make any more queries */ 379 /* We probably don't need to make any more queries */
380 iriap_close(self->iriap); 380 iriap_close(self->iriap);
@@ -382,7 +382,7 @@ static void irda_getvalue_confirm(int result, __u16 obj_id,
382 382
383 /* Check if request succeeded */ 383 /* Check if request succeeded */
384 if (result != IAS_SUCCESS) { 384 if (result != IAS_SUCCESS) {
385 IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __FUNCTION__, 385 IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __func__,
386 result); 386 result);
387 387
388 self->errno = result; /* We really need it later */ 388 self->errno = result; /* We really need it later */
@@ -415,11 +415,11 @@ static void irda_selective_discovery_indication(discinfo_t *discovery,
415{ 415{
416 struct irda_sock *self; 416 struct irda_sock *self;
417 417
418 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 418 IRDA_DEBUG(2, "%s()\n", __func__);
419 419
420 self = (struct irda_sock *) priv; 420 self = (struct irda_sock *) priv;
421 if (!self) { 421 if (!self) {
422 IRDA_WARNING("%s: lost myself!\n", __FUNCTION__); 422 IRDA_WARNING("%s: lost myself!\n", __func__);
423 return; 423 return;
424 } 424 }
425 425
@@ -442,7 +442,7 @@ static void irda_discovery_timeout(u_long priv)
442{ 442{
443 struct irda_sock *self; 443 struct irda_sock *self;
444 444
445 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 445 IRDA_DEBUG(2, "%s()\n", __func__);
446 446
447 self = (struct irda_sock *) priv; 447 self = (struct irda_sock *) priv;
448 BUG_ON(self == NULL); 448 BUG_ON(self == NULL);
@@ -467,7 +467,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
467 notify_t notify; 467 notify_t notify;
468 468
469 if (self->tsap) { 469 if (self->tsap) {
470 IRDA_WARNING("%s: busy!\n", __FUNCTION__); 470 IRDA_WARNING("%s: busy!\n", __func__);
471 return -EBUSY; 471 return -EBUSY;
472 } 472 }
473 473
@@ -486,7 +486,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
486 &notify); 486 &notify);
487 if (self->tsap == NULL) { 487 if (self->tsap == NULL) {
488 IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n", 488 IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n",
489 __FUNCTION__); 489 __func__);
490 return -ENOMEM; 490 return -ENOMEM;
491 } 491 }
492 /* Remember which TSAP selector we actually got */ 492 /* Remember which TSAP selector we actually got */
@@ -507,7 +507,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid)
507 notify_t notify; 507 notify_t notify;
508 508
509 if (self->lsap) { 509 if (self->lsap) {
510 IRDA_WARNING("%s(), busy!\n", __FUNCTION__); 510 IRDA_WARNING("%s(), busy!\n", __func__);
511 return -EBUSY; 511 return -EBUSY;
512 } 512 }
513 513
@@ -519,7 +519,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid)
519 519
520 self->lsap = irlmp_open_lsap(LSAP_CONNLESS, &notify, pid); 520 self->lsap = irlmp_open_lsap(LSAP_CONNLESS, &notify, pid);
521 if (self->lsap == NULL) { 521 if (self->lsap == NULL) {
522 IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __FUNCTION__); 522 IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__);
523 return -ENOMEM; 523 return -ENOMEM;
524 } 524 }
525 525
@@ -540,11 +540,11 @@ static int irda_open_lsap(struct irda_sock *self, int pid)
540 */ 540 */
541static int irda_find_lsap_sel(struct irda_sock *self, char *name) 541static int irda_find_lsap_sel(struct irda_sock *self, char *name)
542{ 542{
543 IRDA_DEBUG(2, "%s(%p, %s)\n", __FUNCTION__, self, name); 543 IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name);
544 544
545 if (self->iriap) { 545 if (self->iriap) {
546 IRDA_WARNING("%s(): busy with a previous query\n", 546 IRDA_WARNING("%s(): busy with a previous query\n",
547 __FUNCTION__); 547 __func__);
548 return -EBUSY; 548 return -EBUSY;
549 } 549 }
550 550
@@ -580,7 +580,7 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
580 switch (self->ias_result->type) { 580 switch (self->ias_result->type) {
581 case IAS_INTEGER: 581 case IAS_INTEGER:
582 IRDA_DEBUG(4, "%s() int=%d\n", 582 IRDA_DEBUG(4, "%s() int=%d\n",
583 __FUNCTION__, self->ias_result->t.integer); 583 __func__, self->ias_result->t.integer);
584 584
585 if (self->ias_result->t.integer != -1) 585 if (self->ias_result->t.integer != -1)
586 self->dtsap_sel = self->ias_result->t.integer; 586 self->dtsap_sel = self->ias_result->t.integer;
@@ -589,7 +589,7 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
589 break; 589 break;
590 default: 590 default:
591 self->dtsap_sel = 0; 591 self->dtsap_sel = 0;
592 IRDA_DEBUG(0, "%s(), bad type!\n", __FUNCTION__); 592 IRDA_DEBUG(0, "%s(), bad type!\n", __func__);
593 break; 593 break;
594 } 594 }
595 if (self->ias_result) 595 if (self->ias_result)
@@ -627,7 +627,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
627 __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ 627 __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */
628 __u8 dtsap_sel = 0x0; /* TSAP associated with it */ 628 __u8 dtsap_sel = 0x0; /* TSAP associated with it */
629 629
630 IRDA_DEBUG(2, "%s(), name=%s\n", __FUNCTION__, name); 630 IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name);
631 631
632 /* Ask lmp for the current discovery log 632 /* Ask lmp for the current discovery log
633 * Note : we have to use irlmp_get_discoveries(), as opposed 633 * Note : we have to use irlmp_get_discoveries(), as opposed
@@ -649,7 +649,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
649 self->daddr = discoveries[i].daddr; 649 self->daddr = discoveries[i].daddr;
650 self->saddr = 0x0; 650 self->saddr = 0x0;
651 IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", 651 IRDA_DEBUG(1, "%s(), trying daddr = %08x\n",
652 __FUNCTION__, self->daddr); 652 __func__, self->daddr);
653 653
654 /* Query remote LM-IAS for this service */ 654 /* Query remote LM-IAS for this service */
655 err = irda_find_lsap_sel(self, name); 655 err = irda_find_lsap_sel(self, name);
@@ -658,7 +658,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
658 /* We found the requested service */ 658 /* We found the requested service */
659 if(daddr != DEV_ADDR_ANY) { 659 if(daddr != DEV_ADDR_ANY) {
660 IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", 660 IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n",
661 __FUNCTION__, name); 661 __func__, name);
662 self->daddr = DEV_ADDR_ANY; 662 self->daddr = DEV_ADDR_ANY;
663 kfree(discoveries); 663 kfree(discoveries);
664 return(-ENOTUNIQ); 664 return(-ENOTUNIQ);
@@ -672,7 +672,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
672 break; 672 break;
673 default: 673 default:
674 /* Something bad did happen :-( */ 674 /* Something bad did happen :-( */
675 IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __FUNCTION__); 675 IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__);
676 self->daddr = DEV_ADDR_ANY; 676 self->daddr = DEV_ADDR_ANY;
677 kfree(discoveries); 677 kfree(discoveries);
678 return(-EHOSTUNREACH); 678 return(-EHOSTUNREACH);
@@ -685,7 +685,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
685 /* Check out what we found */ 685 /* Check out what we found */
686 if(daddr == DEV_ADDR_ANY) { 686 if(daddr == DEV_ADDR_ANY) {
687 IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", 687 IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n",
688 __FUNCTION__, name); 688 __func__, name);
689 self->daddr = DEV_ADDR_ANY; 689 self->daddr = DEV_ADDR_ANY;
690 return(-EADDRNOTAVAIL); 690 return(-EADDRNOTAVAIL);
691 } 691 }
@@ -696,7 +696,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
696 self->dtsap_sel = dtsap_sel; 696 self->dtsap_sel = dtsap_sel;
697 697
698 IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", 698 IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n",
699 __FUNCTION__, name, self->daddr); 699 __func__, name, self->daddr);
700 700
701 return 0; 701 return 0;
702} 702}
@@ -727,8 +727,8 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
727 saddr.sir_addr = self->saddr; 727 saddr.sir_addr = self->saddr;
728 } 728 }
729 729
730 IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __FUNCTION__, saddr.sir_lsap_sel); 730 IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel);
731 IRDA_DEBUG(1, "%s(), addr = %08x\n", __FUNCTION__, saddr.sir_addr); 731 IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr);
732 732
733 /* uaddr_len come to us uninitialised */ 733 /* uaddr_len come to us uninitialised */
734 *uaddr_len = sizeof (struct sockaddr_irda); 734 *uaddr_len = sizeof (struct sockaddr_irda);
@@ -747,7 +747,7 @@ static int irda_listen(struct socket *sock, int backlog)
747{ 747{
748 struct sock *sk = sock->sk; 748 struct sock *sk = sock->sk;
749 749
750 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 750 IRDA_DEBUG(2, "%s()\n", __func__);
751 751
752 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && 752 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
753 (sk->sk_type != SOCK_DGRAM)) 753 (sk->sk_type != SOCK_DGRAM))
@@ -776,7 +776,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
776 struct irda_sock *self = irda_sk(sk); 776 struct irda_sock *self = irda_sk(sk);
777 int err; 777 int err;
778 778
779 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 779 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
780 780
781 if (addr_len != sizeof(struct sockaddr_irda)) 781 if (addr_len != sizeof(struct sockaddr_irda))
782 return -EINVAL; 782 return -EINVAL;
@@ -787,7 +787,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
787 (sk->sk_protocol == IRDAPROTO_ULTRA)) { 787 (sk->sk_protocol == IRDAPROTO_ULTRA)) {
788 self->pid = addr->sir_lsap_sel; 788 self->pid = addr->sir_lsap_sel;
789 if (self->pid & 0x80) { 789 if (self->pid & 0x80) {
790 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __FUNCTION__); 790 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
791 return -EOPNOTSUPP; 791 return -EOPNOTSUPP;
792 } 792 }
793 err = irda_open_lsap(self, self->pid); 793 err = irda_open_lsap(self, self->pid);
@@ -835,9 +835,9 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
835 struct sk_buff *skb; 835 struct sk_buff *skb;
836 int err; 836 int err;
837 837
838 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 838 IRDA_DEBUG(2, "%s()\n", __func__);
839 839
840 err = irda_create(sk->sk_net, newsock, sk->sk_protocol); 840 err = irda_create(sock_net(sk), newsock, sk->sk_protocol);
841 if (err) 841 if (err)
842 return err; 842 return err;
843 843
@@ -893,7 +893,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
893 /* Now attach up the new socket */ 893 /* Now attach up the new socket */
894 new->tsap = irttp_dup(self->tsap, new); 894 new->tsap = irttp_dup(self->tsap, new);
895 if (!new->tsap) { 895 if (!new->tsap) {
896 IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__); 896 IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
897 kfree_skb(skb); 897 kfree_skb(skb);
898 return -1; 898 return -1;
899 } 899 }
@@ -954,7 +954,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
954 struct irda_sock *self = irda_sk(sk); 954 struct irda_sock *self = irda_sk(sk);
955 int err; 955 int err;
956 956
957 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 957 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
958 958
959 /* Don't allow connect for Ultra sockets */ 959 /* Don't allow connect for Ultra sockets */
960 if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) 960 if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA))
@@ -984,13 +984,13 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
984 /* Try to find one suitable */ 984 /* Try to find one suitable */
985 err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); 985 err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name);
986 if (err) { 986 if (err) {
987 IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __FUNCTION__); 987 IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__);
988 return err; 988 return err;
989 } 989 }
990 } else { 990 } else {
991 /* Use the one provided by the user */ 991 /* Use the one provided by the user */
992 self->daddr = addr->sir_addr; 992 self->daddr = addr->sir_addr;
993 IRDA_DEBUG(1, "%s(), daddr = %08x\n", __FUNCTION__, self->daddr); 993 IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr);
994 994
995 /* If we don't have a valid service name, we assume the 995 /* If we don't have a valid service name, we assume the
996 * user want to connect on a specific LSAP. Prevent 996 * user want to connect on a specific LSAP. Prevent
@@ -1000,7 +1000,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
1000 /* Query remote LM-IAS using service name */ 1000 /* Query remote LM-IAS using service name */
1001 err = irda_find_lsap_sel(self, addr->sir_name); 1001 err = irda_find_lsap_sel(self, addr->sir_name);
1002 if (err) { 1002 if (err) {
1003 IRDA_DEBUG(0, "%s(), connect failed!\n", __FUNCTION__); 1003 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
1004 return err; 1004 return err;
1005 } 1005 }
1006 } else { 1006 } else {
@@ -1025,7 +1025,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
1025 self->saddr, self->daddr, NULL, 1025 self->saddr, self->daddr, NULL,
1026 self->max_sdu_size_rx, NULL); 1026 self->max_sdu_size_rx, NULL);
1027 if (err) { 1027 if (err) {
1028 IRDA_DEBUG(0, "%s(), connect failed!\n", __FUNCTION__); 1028 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
1029 return err; 1029 return err;
1030 } 1030 }
1031 1031
@@ -1068,7 +1068,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
1068 struct sock *sk; 1068 struct sock *sk;
1069 struct irda_sock *self; 1069 struct irda_sock *self;
1070 1070
1071 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 1071 IRDA_DEBUG(2, "%s()\n", __func__);
1072 1072
1073 if (net != &init_net) 1073 if (net != &init_net)
1074 return -EAFNOSUPPORT; 1074 return -EAFNOSUPPORT;
@@ -1089,7 +1089,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
1089 return -ENOMEM; 1089 return -ENOMEM;
1090 1090
1091 self = irda_sk(sk); 1091 self = irda_sk(sk);
1092 IRDA_DEBUG(2, "%s() : self is %p\n", __FUNCTION__, self); 1092 IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self);
1093 1093
1094 init_waitqueue_head(&self->query_wait); 1094 init_waitqueue_head(&self->query_wait);
1095 1095
@@ -1149,7 +1149,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
1149 */ 1149 */
1150static void irda_destroy_socket(struct irda_sock *self) 1150static void irda_destroy_socket(struct irda_sock *self)
1151{ 1151{
1152 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 1152 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
1153 1153
1154 /* Unregister with IrLMP */ 1154 /* Unregister with IrLMP */
1155 irlmp_unregister_client(self->ckey); 1155 irlmp_unregister_client(self->ckey);
@@ -1186,7 +1186,7 @@ static int irda_release(struct socket *sock)
1186{ 1186{
1187 struct sock *sk = sock->sk; 1187 struct sock *sk = sock->sk;
1188 1188
1189 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 1189 IRDA_DEBUG(2, "%s()\n", __func__);
1190 1190
1191 if (sk == NULL) 1191 if (sk == NULL)
1192 return 0; 1192 return 0;
@@ -1254,7 +1254,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1254 struct sk_buff *skb; 1254 struct sk_buff *skb;
1255 int err = -EPIPE; 1255 int err = -EPIPE;
1256 1256
1257 IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len); 1257 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1258 1258
1259 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ 1259 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */
1260 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | 1260 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
@@ -1282,7 +1282,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1282 /* Check that we don't send out too big frames */ 1282 /* Check that we don't send out too big frames */
1283 if (len > self->max_data_size) { 1283 if (len > self->max_data_size) {
1284 IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", 1284 IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n",
1285 __FUNCTION__, len, self->max_data_size); 1285 __func__, len, self->max_data_size);
1286 len = self->max_data_size; 1286 len = self->max_data_size;
1287 } 1287 }
1288 1288
@@ -1306,7 +1306,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1306 */ 1306 */
1307 err = irttp_data_request(self->tsap, skb); 1307 err = irttp_data_request(self->tsap, skb);
1308 if (err) { 1308 if (err) {
1309 IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); 1309 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1310 goto out_err; 1310 goto out_err;
1311 } 1311 }
1312 /* Tell client how much data we actually sent */ 1312 /* Tell client how much data we actually sent */
@@ -1332,7 +1332,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1332 size_t copied; 1332 size_t copied;
1333 int err; 1333 int err;
1334 1334
1335 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1335 IRDA_DEBUG(4, "%s()\n", __func__);
1336 1336
1337 if ((err = sock_error(sk)) < 0) 1337 if ((err = sock_error(sk)) < 0)
1338 return err; 1338 return err;
@@ -1347,7 +1347,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1347 1347
1348 if (copied > size) { 1348 if (copied > size) {
1349 IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", 1349 IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
1350 __FUNCTION__, copied, size); 1350 __func__, copied, size);
1351 copied = size; 1351 copied = size;
1352 msg->msg_flags |= MSG_TRUNC; 1352 msg->msg_flags |= MSG_TRUNC;
1353 } 1353 }
@@ -1363,7 +1363,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1363 */ 1363 */
1364 if (self->rx_flow == FLOW_STOP) { 1364 if (self->rx_flow == FLOW_STOP) {
1365 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { 1365 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
1366 IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__); 1366 IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
1367 self->rx_flow = FLOW_START; 1367 self->rx_flow = FLOW_START;
1368 irttp_flow_request(self->tsap, FLOW_START); 1368 irttp_flow_request(self->tsap, FLOW_START);
1369 } 1369 }
@@ -1385,7 +1385,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1385 int target, err; 1385 int target, err;
1386 long timeo; 1386 long timeo;
1387 1387
1388 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1388 IRDA_DEBUG(3, "%s()\n", __func__);
1389 1389
1390 if ((err = sock_error(sk)) < 0) 1390 if ((err = sock_error(sk)) < 0)
1391 return err; 1391 return err;
@@ -1459,14 +1459,14 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1459 /* put the skb back if we didn't use it up.. */ 1459 /* put the skb back if we didn't use it up.. */
1460 if (skb->len) { 1460 if (skb->len) {
1461 IRDA_DEBUG(1, "%s(), back on q!\n", 1461 IRDA_DEBUG(1, "%s(), back on q!\n",
1462 __FUNCTION__); 1462 __func__);
1463 skb_queue_head(&sk->sk_receive_queue, skb); 1463 skb_queue_head(&sk->sk_receive_queue, skb);
1464 break; 1464 break;
1465 } 1465 }
1466 1466
1467 kfree_skb(skb); 1467 kfree_skb(skb);
1468 } else { 1468 } else {
1469 IRDA_DEBUG(0, "%s() questionable!?\n", __FUNCTION__); 1469 IRDA_DEBUG(0, "%s() questionable!?\n", __func__);
1470 1470
1471 /* put message back and return */ 1471 /* put message back and return */
1472 skb_queue_head(&sk->sk_receive_queue, skb); 1472 skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1482,7 +1482,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1482 */ 1482 */
1483 if (self->rx_flow == FLOW_STOP) { 1483 if (self->rx_flow == FLOW_STOP) {
1484 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { 1484 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
1485 IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__); 1485 IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
1486 self->rx_flow = FLOW_START; 1486 self->rx_flow = FLOW_START;
1487 irttp_flow_request(self->tsap, FLOW_START); 1487 irttp_flow_request(self->tsap, FLOW_START);
1488 } 1488 }
@@ -1506,7 +1506,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1506 struct sk_buff *skb; 1506 struct sk_buff *skb;
1507 int err; 1507 int err;
1508 1508
1509 IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len); 1509 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1510 1510
1511 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) 1511 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
1512 return -EINVAL; 1512 return -EINVAL;
@@ -1528,7 +1528,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1528 if (len > self->max_data_size) { 1528 if (len > self->max_data_size) {
1529 IRDA_DEBUG(0, "%s(), Warning to much data! " 1529 IRDA_DEBUG(0, "%s(), Warning to much data! "
1530 "Chopping frame from %zd to %d bytes!\n", 1530 "Chopping frame from %zd to %d bytes!\n",
1531 __FUNCTION__, len, self->max_data_size); 1531 __func__, len, self->max_data_size);
1532 len = self->max_data_size; 1532 len = self->max_data_size;
1533 } 1533 }
1534 1534
@@ -1540,7 +1540,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1540 skb_reserve(skb, self->max_header_size); 1540 skb_reserve(skb, self->max_header_size);
1541 skb_reset_transport_header(skb); 1541 skb_reset_transport_header(skb);
1542 1542
1543 IRDA_DEBUG(4, "%s(), appending user data\n", __FUNCTION__); 1543 IRDA_DEBUG(4, "%s(), appending user data\n", __func__);
1544 skb_put(skb, len); 1544 skb_put(skb, len);
1545 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1545 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1546 if (err) { 1546 if (err) {
@@ -1554,7 +1554,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1554 */ 1554 */
1555 err = irttp_udata_request(self->tsap, skb); 1555 err = irttp_udata_request(self->tsap, skb);
1556 if (err) { 1556 if (err) {
1557 IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); 1557 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1558 return err; 1558 return err;
1559 } 1559 }
1560 return len; 1560 return len;
@@ -1577,7 +1577,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1577 struct sk_buff *skb; 1577 struct sk_buff *skb;
1578 int err; 1578 int err;
1579 1579
1580 IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len); 1580 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1581 1581
1582 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) 1582 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
1583 return -EINVAL; 1583 return -EINVAL;
@@ -1600,7 +1600,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1600 1600
1601 pid = addr->sir_lsap_sel; 1601 pid = addr->sir_lsap_sel;
1602 if (pid & 0x80) { 1602 if (pid & 0x80) {
1603 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __FUNCTION__); 1603 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
1604 return -EOPNOTSUPP; 1604 return -EOPNOTSUPP;
1605 } 1605 }
1606 } else { 1606 } else {
@@ -1609,7 +1609,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1609 if ((self->lsap == NULL) || 1609 if ((self->lsap == NULL) ||
1610 (sk->sk_state != TCP_ESTABLISHED)) { 1610 (sk->sk_state != TCP_ESTABLISHED)) {
1611 IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", 1611 IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n",
1612 __FUNCTION__); 1612 __func__);
1613 return -ENOTCONN; 1613 return -ENOTCONN;
1614 } 1614 }
1615 /* Use PID from socket */ 1615 /* Use PID from socket */
@@ -1623,7 +1623,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1623 if (len > self->max_data_size) { 1623 if (len > self->max_data_size) {
1624 IRDA_DEBUG(0, "%s(), Warning to much data! " 1624 IRDA_DEBUG(0, "%s(), Warning to much data! "
1625 "Chopping frame from %zd to %d bytes!\n", 1625 "Chopping frame from %zd to %d bytes!\n",
1626 __FUNCTION__, len, self->max_data_size); 1626 __func__, len, self->max_data_size);
1627 len = self->max_data_size; 1627 len = self->max_data_size;
1628 } 1628 }
1629 1629
@@ -1635,7 +1635,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1635 skb_reserve(skb, self->max_header_size); 1635 skb_reserve(skb, self->max_header_size);
1636 skb_reset_transport_header(skb); 1636 skb_reset_transport_header(skb);
1637 1637
1638 IRDA_DEBUG(4, "%s(), appending user data\n", __FUNCTION__); 1638 IRDA_DEBUG(4, "%s(), appending user data\n", __func__);
1639 skb_put(skb, len); 1639 skb_put(skb, len);
1640 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1640 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1641 if (err) { 1641 if (err) {
@@ -1646,7 +1646,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1646 err = irlmp_connless_data_request((bound ? self->lsap : NULL), 1646 err = irlmp_connless_data_request((bound ? self->lsap : NULL),
1647 skb, pid); 1647 skb, pid);
1648 if (err) { 1648 if (err) {
1649 IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); 1649 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1650 return err; 1650 return err;
1651 } 1651 }
1652 return len; 1652 return len;
@@ -1661,7 +1661,7 @@ static int irda_shutdown(struct socket *sock, int how)
1661 struct sock *sk = sock->sk; 1661 struct sock *sk = sock->sk;
1662 struct irda_sock *self = irda_sk(sk); 1662 struct irda_sock *self = irda_sk(sk);
1663 1663
1664 IRDA_DEBUG(1, "%s(%p)\n", __FUNCTION__, self); 1664 IRDA_DEBUG(1, "%s(%p)\n", __func__, self);
1665 1665
1666 sk->sk_state = TCP_CLOSE; 1666 sk->sk_state = TCP_CLOSE;
1667 sk->sk_shutdown |= SEND_SHUTDOWN; 1667 sk->sk_shutdown |= SEND_SHUTDOWN;
@@ -1696,7 +1696,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
1696 struct irda_sock *self = irda_sk(sk); 1696 struct irda_sock *self = irda_sk(sk);
1697 unsigned int mask; 1697 unsigned int mask;
1698 1698
1699 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1699 IRDA_DEBUG(4, "%s()\n", __func__);
1700 1700
1701 poll_wait(file, sk->sk_sleep, wait); 1701 poll_wait(file, sk->sk_sleep, wait);
1702 mask = 0; 1702 mask = 0;
@@ -1705,7 +1705,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
1705 if (sk->sk_err) 1705 if (sk->sk_err)
1706 mask |= POLLERR; 1706 mask |= POLLERR;
1707 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1707 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1708 IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__); 1708 IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__);
1709 mask |= POLLHUP; 1709 mask |= POLLHUP;
1710 } 1710 }
1711 1711
@@ -1719,7 +1719,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
1719 switch (sk->sk_type) { 1719 switch (sk->sk_type) {
1720 case SOCK_STREAM: 1720 case SOCK_STREAM:
1721 if (sk->sk_state == TCP_CLOSE) { 1721 if (sk->sk_state == TCP_CLOSE) {
1722 IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__); 1722 IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__);
1723 mask |= POLLHUP; 1723 mask |= POLLHUP;
1724 } 1724 }
1725 1725
@@ -1755,7 +1755,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1755{ 1755{
1756 struct sock *sk = sock->sk; 1756 struct sock *sk = sock->sk;
1757 1757
1758 IRDA_DEBUG(4, "%s(), cmd=%#x\n", __FUNCTION__, cmd); 1758 IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd);
1759 1759
1760 switch (cmd) { 1760 switch (cmd) {
1761 case TIOCOUTQ: { 1761 case TIOCOUTQ: {
@@ -1796,7 +1796,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1796 case SIOCSIFMETRIC: 1796 case SIOCSIFMETRIC:
1797 return -EINVAL; 1797 return -EINVAL;
1798 default: 1798 default:
1799 IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __FUNCTION__); 1799 IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__);
1800 return -ENOIOCTLCMD; 1800 return -ENOIOCTLCMD;
1801 } 1801 }
1802 1802
@@ -1833,7 +1833,7 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
1833 struct ias_attrib * ias_attr; /* Attribute in IAS object */ 1833 struct ias_attrib * ias_attr; /* Attribute in IAS object */
1834 int opt, free_ias = 0; 1834 int opt, free_ias = 0;
1835 1835
1836 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 1836 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
1837 1837
1838 if (level != SOL_IRLMP) 1838 if (level != SOL_IRLMP)
1839 return -ENOPROTOOPT; 1839 return -ENOPROTOOPT;
@@ -2012,7 +2012,7 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
2012 2012
2013 /* Check is the user space own the object */ 2013 /* Check is the user space own the object */
2014 if(ias_attr->value->owner != IAS_USER_ATTR) { 2014 if(ias_attr->value->owner != IAS_USER_ATTR) {
2015 IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __FUNCTION__); 2015 IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__);
2016 kfree(ias_opt); 2016 kfree(ias_opt);
2017 return -EPERM; 2017 return -EPERM;
2018 } 2018 }
@@ -2031,11 +2031,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
2031 /* Only possible for a seqpacket service (TTP with SAR) */ 2031 /* Only possible for a seqpacket service (TTP with SAR) */
2032 if (sk->sk_type != SOCK_SEQPACKET) { 2032 if (sk->sk_type != SOCK_SEQPACKET) {
2033 IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", 2033 IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n",
2034 __FUNCTION__, opt); 2034 __func__, opt);
2035 self->max_sdu_size_rx = opt; 2035 self->max_sdu_size_rx = opt;
2036 } else { 2036 } else {
2037 IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", 2037 IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n",
2038 __FUNCTION__); 2038 __func__);
2039 return -ENOPROTOOPT; 2039 return -ENOPROTOOPT;
2040 } 2040 }
2041 break; 2041 break;
@@ -2149,7 +2149,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
2149 int err; 2149 int err;
2150 int offset, total; 2150 int offset, total;
2151 2151
2152 IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); 2152 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
2153 2153
2154 if (level != SOL_IRLMP) 2154 if (level != SOL_IRLMP)
2155 return -ENOPROTOOPT; 2155 return -ENOPROTOOPT;
@@ -2310,7 +2310,7 @@ bed:
2310 /* Check that we can proceed with IAP */ 2310 /* Check that we can proceed with IAP */
2311 if (self->iriap) { 2311 if (self->iriap) {
2312 IRDA_WARNING("%s: busy with a previous query\n", 2312 IRDA_WARNING("%s: busy with a previous query\n",
2313 __FUNCTION__); 2313 __func__);
2314 kfree(ias_opt); 2314 kfree(ias_opt);
2315 return -EBUSY; 2315 return -EBUSY;
2316 } 2316 }
@@ -2406,7 +2406,7 @@ bed:
2406 if (!self->cachedaddr) { 2406 if (!self->cachedaddr) {
2407 int ret = 0; 2407 int ret = 0;
2408 2408
2409 IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __FUNCTION__); 2409 IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__);
2410 2410
2411 /* Set watchdog timer to expire in <val> ms. */ 2411 /* Set watchdog timer to expire in <val> ms. */
2412 self->errno = 0; 2412 self->errno = 0;
@@ -2424,14 +2424,14 @@ bed:
2424 if(timer_pending(&(self->watchdog))) 2424 if(timer_pending(&(self->watchdog)))
2425 del_timer(&(self->watchdog)); 2425 del_timer(&(self->watchdog));
2426 2426
2427 IRDA_DEBUG(1, "%s(), ...waking up !\n", __FUNCTION__); 2427 IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__);
2428 2428
2429 if (ret != 0) 2429 if (ret != 0)
2430 return ret; 2430 return ret;
2431 } 2431 }
2432 else 2432 else
2433 IRDA_DEBUG(1, "%s(), found immediately !\n", 2433 IRDA_DEBUG(1, "%s(), found immediately !\n",
2434 __FUNCTION__); 2434 __func__);
2435 2435
2436 /* Tell IrLMP that we have been notified */ 2436 /* Tell IrLMP that we have been notified */
2437 irlmp_update_client(self->ckey, self->mask.word, 2437 irlmp_update_client(self->ckey, self->mask.word,
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index 80c33f408e3f..bfacef8b76f4 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -110,7 +110,7 @@ void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
110{ 110{
111 discovery_t *discovery; 111 discovery_t *discovery;
112 112
113 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 113 IRDA_DEBUG(4, "%s()\n", __func__);
114 114
115 /* 115 /*
116 * If log is missing this means that IrLAP was unable to perform the 116 * If log is missing this means that IrLAP was unable to perform the
@@ -157,7 +157,7 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
157 int i = 0; /* How many we expired */ 157 int i = 0; /* How many we expired */
158 158
159 IRDA_ASSERT(log != NULL, return;); 159 IRDA_ASSERT(log != NULL, return;);
160 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 160 IRDA_DEBUG(4, "%s()\n", __func__);
161 161
162 spin_lock_irqsave(&log->hb_spinlock, flags); 162 spin_lock_irqsave(&log->hb_spinlock, flags);
163 163
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 6eef1f2a7553..018c92941aba 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -70,7 +70,7 @@ static int __init ircomm_init(void)
70{ 70{
71 ircomm = hashbin_new(HB_LOCK); 71 ircomm = hashbin_new(HB_LOCK);
72 if (ircomm == NULL) { 72 if (ircomm == NULL) {
73 IRDA_ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__); 73 IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__);
74 return -ENOMEM; 74 return -ENOMEM;
75 } 75 }
76 76
@@ -91,7 +91,7 @@ static int __init ircomm_init(void)
91 91
92static void __exit ircomm_cleanup(void) 92static void __exit ircomm_cleanup(void)
93{ 93{
94 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 94 IRDA_DEBUG(2, "%s()\n", __func__ );
95 95
96 hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close); 96 hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close);
97 97
@@ -111,7 +111,7 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line)
111 struct ircomm_cb *self = NULL; 111 struct ircomm_cb *self = NULL;
112 int ret; 112 int ret;
113 113
114 IRDA_DEBUG(2, "%s(), service_type=0x%02x\n", __FUNCTION__ , 114 IRDA_DEBUG(2, "%s(), service_type=0x%02x\n", __func__ ,
115 service_type); 115 service_type);
116 116
117 IRDA_ASSERT(ircomm != NULL, return NULL;); 117 IRDA_ASSERT(ircomm != NULL, return NULL;);
@@ -155,7 +155,7 @@ EXPORT_SYMBOL(ircomm_open);
155 */ 155 */
156static int __ircomm_close(struct ircomm_cb *self) 156static int __ircomm_close(struct ircomm_cb *self)
157{ 157{
158 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 158 IRDA_DEBUG(2, "%s()\n", __func__ );
159 159
160 /* Disconnect link if any */ 160 /* Disconnect link if any */
161 ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL); 161 ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL);
@@ -191,7 +191,7 @@ int ircomm_close(struct ircomm_cb *self)
191 IRDA_ASSERT(self != NULL, return -EIO;); 191 IRDA_ASSERT(self != NULL, return -EIO;);
192 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;); 192 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;);
193 193
194 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 194 IRDA_DEBUG(0, "%s()\n", __func__ );
195 195
196 entry = hashbin_remove(ircomm, self->line, NULL); 196 entry = hashbin_remove(ircomm, self->line, NULL);
197 197
@@ -216,7 +216,7 @@ int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel,
216 struct ircomm_info info; 216 struct ircomm_info info;
217 int ret; 217 int ret;
218 218
219 IRDA_DEBUG(2 , "%s()\n", __FUNCTION__ ); 219 IRDA_DEBUG(2 , "%s()\n", __func__ );
220 220
221 IRDA_ASSERT(self != NULL, return -1;); 221 IRDA_ASSERT(self != NULL, return -1;);
222 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); 222 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
@@ -245,7 +245,7 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
245{ 245{
246 int clen = 0; 246 int clen = 0;
247 247
248 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 248 IRDA_DEBUG(2, "%s()\n", __func__ );
249 249
250 /* Check if the packet contains data on the control channel */ 250 /* Check if the packet contains data on the control channel */
251 if (skb->len > 0) 251 if (skb->len > 0)
@@ -261,7 +261,7 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
261 info->qos, info->max_data_size, 261 info->qos, info->max_data_size,
262 info->max_header_size, skb); 262 info->max_header_size, skb);
263 else { 263 else {
264 IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); 264 IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
265 } 265 }
266} 266}
267 267
@@ -278,7 +278,7 @@ int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata)
278 IRDA_ASSERT(self != NULL, return -1;); 278 IRDA_ASSERT(self != NULL, return -1;);
279 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); 279 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
280 280
281 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 281 IRDA_DEBUG(4, "%s()\n", __func__ );
282 282
283 ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL); 283 ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL);
284 284
@@ -296,7 +296,7 @@ EXPORT_SYMBOL(ircomm_connect_response);
296void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb, 296void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
297 struct ircomm_info *info) 297 struct ircomm_info *info)
298{ 298{
299 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 299 IRDA_DEBUG(4, "%s()\n", __func__ );
300 300
301 if (self->notify.connect_confirm ) 301 if (self->notify.connect_confirm )
302 self->notify.connect_confirm(self->notify.instance, 302 self->notify.connect_confirm(self->notify.instance,
@@ -304,7 +304,7 @@ void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
304 info->max_data_size, 304 info->max_data_size,
305 info->max_header_size, skb); 305 info->max_header_size, skb);
306 else { 306 else {
307 IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); 307 IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
308 } 308 }
309} 309}
310 310
@@ -318,7 +318,7 @@ int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb)
318{ 318{
319 int ret; 319 int ret;
320 320
321 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 321 IRDA_DEBUG(4, "%s()\n", __func__ );
322 322
323 IRDA_ASSERT(self != NULL, return -EFAULT;); 323 IRDA_ASSERT(self != NULL, return -EFAULT;);
324 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); 324 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;);
@@ -339,14 +339,14 @@ EXPORT_SYMBOL(ircomm_data_request);
339 */ 339 */
340void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb) 340void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb)
341{ 341{
342 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 342 IRDA_DEBUG(4, "%s()\n", __func__ );
343 343
344 IRDA_ASSERT(skb->len > 0, return;); 344 IRDA_ASSERT(skb->len > 0, return;);
345 345
346 if (self->notify.data_indication) 346 if (self->notify.data_indication)
347 self->notify.data_indication(self->notify.instance, self, skb); 347 self->notify.data_indication(self->notify.instance, self, skb);
348 else { 348 else {
349 IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); 349 IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
350 } 350 }
351} 351}
352 352
@@ -372,7 +372,7 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
372 */ 372 */
373 if (unlikely(skb->len < (clen + 1))) { 373 if (unlikely(skb->len < (clen + 1))) {
374 IRDA_DEBUG(2, "%s() throwing away illegal frame\n", 374 IRDA_DEBUG(2, "%s() throwing away illegal frame\n",
375 __FUNCTION__ ); 375 __func__ );
376 return; 376 return;
377 } 377 }
378 378
@@ -391,7 +391,7 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
391 ircomm_data_indication(self, skb); 391 ircomm_data_indication(self, skb);
392 else { 392 else {
393 IRDA_DEBUG(4, "%s(), data was control info only!\n", 393 IRDA_DEBUG(4, "%s(), data was control info only!\n",
394 __FUNCTION__ ); 394 __func__ );
395 } 395 }
396} 396}
397 397
@@ -405,7 +405,7 @@ int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb)
405{ 405{
406 int ret; 406 int ret;
407 407
408 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 408 IRDA_DEBUG(2, "%s()\n", __func__ );
409 409
410 IRDA_ASSERT(self != NULL, return -EFAULT;); 410 IRDA_ASSERT(self != NULL, return -EFAULT;);
411 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); 411 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;);
@@ -427,7 +427,7 @@ EXPORT_SYMBOL(ircomm_control_request);
427static void ircomm_control_indication(struct ircomm_cb *self, 427static void ircomm_control_indication(struct ircomm_cb *self,
428 struct sk_buff *skb, int clen) 428 struct sk_buff *skb, int clen)
429{ 429{
430 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 430 IRDA_DEBUG(2, "%s()\n", __func__ );
431 431
432 /* Use udata for delivering data on the control channel */ 432 /* Use udata for delivering data on the control channel */
433 if (self->notify.udata_indication) { 433 if (self->notify.udata_indication) {
@@ -448,7 +448,7 @@ static void ircomm_control_indication(struct ircomm_cb *self,
448 * see ircomm_tty_control_indication(). */ 448 * see ircomm_tty_control_indication(). */
449 dev_kfree_skb(ctrl_skb); 449 dev_kfree_skb(ctrl_skb);
450 } else { 450 } else {
451 IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); 451 IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
452 } 452 }
453} 453}
454 454
@@ -463,7 +463,7 @@ int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata)
463 struct ircomm_info info; 463 struct ircomm_info info;
464 int ret; 464 int ret;
465 465
466 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 466 IRDA_DEBUG(2, "%s()\n", __func__ );
467 467
468 IRDA_ASSERT(self != NULL, return -1;); 468 IRDA_ASSERT(self != NULL, return -1;);
469 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); 469 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
@@ -484,7 +484,7 @@ EXPORT_SYMBOL(ircomm_disconnect_request);
484void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, 484void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
485 struct ircomm_info *info) 485 struct ircomm_info *info)
486{ 486{
487 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 487 IRDA_DEBUG(2, "%s()\n", __func__ );
488 488
489 IRDA_ASSERT(info != NULL, return;); 489 IRDA_ASSERT(info != NULL, return;);
490 490
@@ -492,7 +492,7 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
492 self->notify.disconnect_indication(self->notify.instance, self, 492 self->notify.disconnect_indication(self->notify.instance, self,
493 info->reason, skb); 493 info->reason, skb);
494 } else { 494 } else {
495 IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); 495 IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
496 } 496 }
497} 497}
498 498
@@ -504,7 +504,7 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
504 */ 504 */
505void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow) 505void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow)
506{ 506{
507 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 507 IRDA_DEBUG(2, "%s()\n", __func__ );
508 508
509 IRDA_ASSERT(self != NULL, return;); 509 IRDA_ASSERT(self != NULL, return;);
510 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 510 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c
index 8ba4e59ece16..c35b3ef5c2f0 100644
--- a/net/irda/ircomm/ircomm_event.c
+++ b/net/irda/ircomm/ircomm_event.c
@@ -108,7 +108,7 @@ static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
108 ircomm_connect_indication(self, skb, info); 108 ircomm_connect_indication(self, skb, info);
109 break; 109 break;
110 default: 110 default:
111 IRDA_DEBUG(4, "%s(), unknown event: %s\n", __FUNCTION__ , 111 IRDA_DEBUG(4, "%s(), unknown event: %s\n", __func__ ,
112 ircomm_event[event]); 112 ircomm_event[event]);
113 ret = -EINVAL; 113 ret = -EINVAL;
114 } 114 }
@@ -138,7 +138,7 @@ static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
138 ircomm_disconnect_indication(self, skb, info); 138 ircomm_disconnect_indication(self, skb, info);
139 break; 139 break;
140 default: 140 default:
141 IRDA_DEBUG(0, "%s(), unknown event: %s\n", __FUNCTION__ , 141 IRDA_DEBUG(0, "%s(), unknown event: %s\n", __func__ ,
142 ircomm_event[event]); 142 ircomm_event[event]);
143 ret = -EINVAL; 143 ret = -EINVAL;
144 } 144 }
@@ -171,7 +171,7 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
171 ircomm_disconnect_indication(self, skb, info); 171 ircomm_disconnect_indication(self, skb, info);
172 break; 172 break;
173 default: 173 default:
174 IRDA_DEBUG(0, "%s(), unknown event = %s\n", __FUNCTION__ , 174 IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ ,
175 ircomm_event[event]); 175 ircomm_event[event]);
176 ret = -EINVAL; 176 ret = -EINVAL;
177 } 177 }
@@ -213,7 +213,7 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
213 ret = self->issue.disconnect_request(self, skb, info); 213 ret = self->issue.disconnect_request(self, skb, info);
214 break; 214 break;
215 default: 215 default:
216 IRDA_DEBUG(0, "%s(), unknown event = %s\n", __FUNCTION__ , 216 IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ ,
217 ircomm_event[event]); 217 ircomm_event[event]);
218 ret = -EINVAL; 218 ret = -EINVAL;
219 } 219 }
@@ -229,7 +229,7 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
229int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, 229int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event,
230 struct sk_buff *skb, struct ircomm_info *info) 230 struct sk_buff *skb, struct ircomm_info *info)
231{ 231{
232 IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __FUNCTION__ , 232 IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __func__ ,
233 ircomm_state[self->state], ircomm_event[event]); 233 ircomm_state[self->state], ircomm_event[event]);
234 234
235 return (*state[self->state])(self, event, skb, info); 235 return (*state[self->state])(self, event, skb, info);
@@ -245,6 +245,6 @@ void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state)
245{ 245{
246 self->state = state; 246 self->state = state;
247 247
248 IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __FUNCTION__ , 248 IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __func__ ,
249 ircomm_state[self->state], self->service_type); 249 ircomm_state[self->state], self->service_type);
250} 250}
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 55860ee4e39e..67c99d20857f 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -53,7 +53,7 @@ static int ircomm_lmp_connect_request(struct ircomm_cb *self,
53{ 53{
54 int ret = 0; 54 int ret = 0;
55 55
56 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 56 IRDA_DEBUG(0, "%s()\n", __func__ );
57 57
58 /* Don't forget to refcount it - should be NULL anyway */ 58 /* Don't forget to refcount it - should be NULL anyway */
59 if(userdata) 59 if(userdata)
@@ -76,7 +76,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
76 struct sk_buff *tx_skb; 76 struct sk_buff *tx_skb;
77 int ret; 77 int ret;
78 78
79 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 79 IRDA_DEBUG(0, "%s()\n", __func__ );
80 80
81 /* Any userdata supplied? */ 81 /* Any userdata supplied? */
82 if (userdata == NULL) { 82 if (userdata == NULL) {
@@ -111,7 +111,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
111 struct sk_buff *tx_skb; 111 struct sk_buff *tx_skb;
112 int ret; 112 int ret;
113 113
114 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 114 IRDA_DEBUG(0, "%s()\n", __func__ );
115 115
116 if (!userdata) { 116 if (!userdata) {
117 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); 117 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
@@ -148,13 +148,13 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb)
148 148
149 cb = (struct irda_skb_cb *) skb->cb; 149 cb = (struct irda_skb_cb *) skb->cb;
150 150
151 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 151 IRDA_DEBUG(2, "%s()\n", __func__ );
152 152
153 line = cb->line; 153 line = cb->line;
154 154
155 self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL); 155 self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL);
156 if (!self) { 156 if (!self) {
157 IRDA_DEBUG(2, "%s(), didn't find myself\n", __FUNCTION__ ); 157 IRDA_DEBUG(2, "%s(), didn't find myself\n", __func__ );
158 return; 158 return;
159 } 159 }
160 160
@@ -164,7 +164,7 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb)
164 self->pkt_count--; 164 self->pkt_count--;
165 165
166 if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { 166 if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) {
167 IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __FUNCTION__ ); 167 IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __func__ );
168 self->flow_status = FLOW_START; 168 self->flow_status = FLOW_START;
169 if (self->notify.flow_indication) 169 if (self->notify.flow_indication)
170 self->notify.flow_indication(self->notify.instance, 170 self->notify.flow_indication(self->notify.instance,
@@ -191,7 +191,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
191 191
192 cb->line = self->line; 192 cb->line = self->line;
193 193
194 IRDA_DEBUG(4, "%s(), sending frame\n", __FUNCTION__ ); 194 IRDA_DEBUG(4, "%s(), sending frame\n", __func__ );
195 195
196 /* Don't forget to refcount it - see ircomm_tty_do_softint() */ 196 /* Don't forget to refcount it - see ircomm_tty_do_softint() */
197 skb_get(skb); 197 skb_get(skb);
@@ -199,7 +199,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
199 skb->destructor = ircomm_lmp_flow_control; 199 skb->destructor = ircomm_lmp_flow_control;
200 200
201 if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { 201 if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) {
202 IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __FUNCTION__ ); 202 IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __func__ );
203 self->flow_status = FLOW_STOP; 203 self->flow_status = FLOW_STOP;
204 if (self->notify.flow_indication) 204 if (self->notify.flow_indication)
205 self->notify.flow_indication(self->notify.instance, 205 self->notify.flow_indication(self->notify.instance,
@@ -207,7 +207,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
207 } 207 }
208 ret = irlmp_data_request(self->lsap, skb); 208 ret = irlmp_data_request(self->lsap, skb);
209 if (ret) { 209 if (ret) {
210 IRDA_ERROR("%s(), failed\n", __FUNCTION__); 210 IRDA_ERROR("%s(), failed\n", __func__);
211 /* irlmp_data_request already free the packet */ 211 /* irlmp_data_request already free the packet */
212 } 212 }
213 213
@@ -225,7 +225,7 @@ static int ircomm_lmp_data_indication(void *instance, void *sap,
225{ 225{
226 struct ircomm_cb *self = (struct ircomm_cb *) instance; 226 struct ircomm_cb *self = (struct ircomm_cb *) instance;
227 227
228 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 228 IRDA_DEBUG(4, "%s()\n", __func__ );
229 229
230 IRDA_ASSERT(self != NULL, return -1;); 230 IRDA_ASSERT(self != NULL, return -1;);
231 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); 231 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
@@ -255,7 +255,7 @@ static void ircomm_lmp_connect_confirm(void *instance, void *sap,
255 struct ircomm_cb *self = (struct ircomm_cb *) instance; 255 struct ircomm_cb *self = (struct ircomm_cb *) instance;
256 struct ircomm_info info; 256 struct ircomm_info info;
257 257
258 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 258 IRDA_DEBUG(0, "%s()\n", __func__ );
259 259
260 IRDA_ASSERT(self != NULL, return;); 260 IRDA_ASSERT(self != NULL, return;);
261 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 261 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
@@ -288,7 +288,7 @@ static void ircomm_lmp_connect_indication(void *instance, void *sap,
288 struct ircomm_cb *self = (struct ircomm_cb *)instance; 288 struct ircomm_cb *self = (struct ircomm_cb *)instance;
289 struct ircomm_info info; 289 struct ircomm_info info;
290 290
291 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 291 IRDA_DEBUG(0, "%s()\n", __func__ );
292 292
293 IRDA_ASSERT(self != NULL, return;); 293 IRDA_ASSERT(self != NULL, return;);
294 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 294 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
@@ -318,7 +318,7 @@ static void ircomm_lmp_disconnect_indication(void *instance, void *sap,
318 struct ircomm_cb *self = (struct ircomm_cb *) instance; 318 struct ircomm_cb *self = (struct ircomm_cb *) instance;
319 struct ircomm_info info; 319 struct ircomm_info info;
320 320
321 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 321 IRDA_DEBUG(0, "%s()\n", __func__ );
322 322
323 IRDA_ASSERT(self != NULL, return;); 323 IRDA_ASSERT(self != NULL, return;);
324 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 324 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
@@ -341,7 +341,7 @@ int ircomm_open_lsap(struct ircomm_cb *self)
341{ 341{
342 notify_t notify; 342 notify_t notify;
343 343
344 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 344 IRDA_DEBUG(0, "%s()\n", __func__ );
345 345
346 /* Register callbacks */ 346 /* Register callbacks */
347 irda_notify_init(&notify); 347 irda_notify_init(&notify);
@@ -354,7 +354,7 @@ int ircomm_open_lsap(struct ircomm_cb *self)
354 354
355 self->lsap = irlmp_open_lsap(LSAP_ANY, &notify, 0); 355 self->lsap = irlmp_open_lsap(LSAP_ANY, &notify, 0);
356 if (!self->lsap) { 356 if (!self->lsap) {
357 IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __FUNCTION__ ); 357 IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __func__ );
358 return -1; 358 return -1;
359 } 359 }
360 self->slsap_sel = self->lsap->slsap_sel; 360 self->slsap_sel = self->lsap->slsap_sel;
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index 598dcbe4a501..d57aefd9fe77 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -103,7 +103,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
103 struct sk_buff *skb; 103 struct sk_buff *skb;
104 int count; 104 int count;
105 105
106 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 106 IRDA_DEBUG(2, "%s()\n", __func__ );
107 107
108 IRDA_ASSERT(self != NULL, return -1;); 108 IRDA_ASSERT(self != NULL, return -1;);
109 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 109 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
@@ -136,7 +136,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
136 count = irda_param_insert(self, pi, skb_tail_pointer(skb), 136 count = irda_param_insert(self, pi, skb_tail_pointer(skb),
137 skb_tailroom(skb), &ircomm_param_info); 137 skb_tailroom(skb), &ircomm_param_info);
138 if (count < 0) { 138 if (count < 0) {
139 IRDA_WARNING("%s(), no room for parameter!\n", __FUNCTION__); 139 IRDA_WARNING("%s(), no room for parameter!\n", __func__);
140 spin_unlock_irqrestore(&self->spinlock, flags); 140 spin_unlock_irqrestore(&self->spinlock, flags);
141 return -1; 141 return -1;
142 } 142 }
@@ -144,7 +144,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
144 144
145 spin_unlock_irqrestore(&self->spinlock, flags); 145 spin_unlock_irqrestore(&self->spinlock, flags);
146 146
147 IRDA_DEBUG(2, "%s(), skb->len=%d\n", __FUNCTION__ , skb->len); 147 IRDA_DEBUG(2, "%s(), skb->len=%d\n", __func__ , skb->len);
148 148
149 if (flush) { 149 if (flush) {
150 /* ircomm_tty_do_softint will take care of the rest */ 150 /* ircomm_tty_do_softint will take care of the rest */
@@ -179,10 +179,10 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param,
179 service_type &= self->service_type; 179 service_type &= self->service_type;
180 if (!service_type) { 180 if (!service_type) {
181 IRDA_DEBUG(2, 181 IRDA_DEBUG(2,
182 "%s(), No common service type to use!\n", __FUNCTION__ ); 182 "%s(), No common service type to use!\n", __func__ );
183 return -1; 183 return -1;
184 } 184 }
185 IRDA_DEBUG(0, "%s(), services in common=%02x\n", __FUNCTION__ , 185 IRDA_DEBUG(0, "%s(), services in common=%02x\n", __func__ ,
186 service_type); 186 service_type);
187 187
188 /* 188 /*
@@ -197,7 +197,7 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param,
197 else if (service_type & IRCOMM_3_WIRE_RAW) 197 else if (service_type & IRCOMM_3_WIRE_RAW)
198 self->settings.service_type = IRCOMM_3_WIRE_RAW; 198 self->settings.service_type = IRCOMM_3_WIRE_RAW;
199 199
200 IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __FUNCTION__ , 200 IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __func__ ,
201 self->settings.service_type); 201 self->settings.service_type);
202 202
203 /* 203 /*
@@ -240,7 +240,7 @@ static int ircomm_param_port_type(void *instance, irda_param_t *param, int get)
240 else { 240 else {
241 self->settings.port_type = (__u8) param->pv.i; 241 self->settings.port_type = (__u8) param->pv.i;
242 242
243 IRDA_DEBUG(0, "%s(), port type=%d\n", __FUNCTION__ , 243 IRDA_DEBUG(0, "%s(), port type=%d\n", __func__ ,
244 self->settings.port_type); 244 self->settings.port_type);
245 } 245 }
246 return 0; 246 return 0;
@@ -260,9 +260,9 @@ static int ircomm_param_port_name(void *instance, irda_param_t *param, int get)
260 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 260 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
261 261
262 if (get) { 262 if (get) {
263 IRDA_DEBUG(0, "%s(), not imp!\n", __FUNCTION__ ); 263 IRDA_DEBUG(0, "%s(), not imp!\n", __func__ );
264 } else { 264 } else {
265 IRDA_DEBUG(0, "%s(), port-name=%s\n", __FUNCTION__ , param->pv.c); 265 IRDA_DEBUG(0, "%s(), port-name=%s\n", __func__ , param->pv.c);
266 strncpy(self->settings.port_name, param->pv.c, 32); 266 strncpy(self->settings.port_name, param->pv.c, 32);
267 } 267 }
268 268
@@ -287,7 +287,7 @@ static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get)
287 else 287 else
288 self->settings.data_rate = param->pv.i; 288 self->settings.data_rate = param->pv.i;
289 289
290 IRDA_DEBUG(2, "%s(), data rate = %d\n", __FUNCTION__ , param->pv.i); 290 IRDA_DEBUG(2, "%s(), data rate = %d\n", __func__ , param->pv.i);
291 291
292 return 0; 292 return 0;
293} 293}
@@ -333,7 +333,7 @@ static int ircomm_param_flow_control(void *instance, irda_param_t *param,
333 else 333 else
334 self->settings.flow_control = (__u8) param->pv.i; 334 self->settings.flow_control = (__u8) param->pv.i;
335 335
336 IRDA_DEBUG(1, "%s(), flow control = 0x%02x\n", __FUNCTION__ , (__u8) param->pv.i); 336 IRDA_DEBUG(1, "%s(), flow control = 0x%02x\n", __func__ , (__u8) param->pv.i);
337 337
338 return 0; 338 return 0;
339} 339}
@@ -359,7 +359,7 @@ static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get)
359 self->settings.xonxoff[1] = (__u16) param->pv.i >> 8; 359 self->settings.xonxoff[1] = (__u16) param->pv.i >> 8;
360 } 360 }
361 361
362 IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __FUNCTION__ , 362 IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ ,
363 param->pv.i & 0xff, param->pv.i >> 8); 363 param->pv.i & 0xff, param->pv.i >> 8);
364 364
365 return 0; 365 return 0;
@@ -386,7 +386,7 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get)
386 self->settings.enqack[1] = (__u16) param->pv.i >> 8; 386 self->settings.enqack[1] = (__u16) param->pv.i >> 8;
387 } 387 }
388 388
389 IRDA_DEBUG(0, "%s(), ENQ/ACK = 0x%02x,0x%02x\n", __FUNCTION__ , 389 IRDA_DEBUG(0, "%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ ,
390 param->pv.i & 0xff, param->pv.i >> 8); 390 param->pv.i & 0xff, param->pv.i >> 8);
391 391
392 return 0; 392 return 0;
@@ -401,7 +401,7 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get)
401static int ircomm_param_line_status(void *instance, irda_param_t *param, 401static int ircomm_param_line_status(void *instance, irda_param_t *param,
402 int get) 402 int get)
403{ 403{
404 IRDA_DEBUG(2, "%s(), not impl.\n", __FUNCTION__ ); 404 IRDA_DEBUG(2, "%s(), not impl.\n", __func__ );
405 405
406 return 0; 406 return 0;
407} 407}
@@ -462,7 +462,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
462 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 462 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
463 __u8 dce; 463 __u8 dce;
464 464
465 IRDA_DEBUG(1, "%s(), dce = 0x%02x\n", __FUNCTION__ , (__u8) param->pv.i); 465 IRDA_DEBUG(1, "%s(), dce = 0x%02x\n", __func__ , (__u8) param->pv.i);
466 466
467 dce = (__u8) param->pv.i; 467 dce = (__u8) param->pv.i;
468 468
@@ -474,7 +474,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
474 /* Check if any of the settings have changed */ 474 /* Check if any of the settings have changed */
475 if (dce & 0x0f) { 475 if (dce & 0x0f) {
476 if (dce & IRCOMM_DELTA_CTS) { 476 if (dce & IRCOMM_DELTA_CTS) {
477 IRDA_DEBUG(2, "%s(), CTS \n", __FUNCTION__ ); 477 IRDA_DEBUG(2, "%s(), CTS \n", __func__ );
478 } 478 }
479 } 479 }
480 480
diff --git a/net/irda/ircomm/ircomm_ttp.c b/net/irda/ircomm/ircomm_ttp.c
index 712eafd0cc76..6e6509f22f60 100644
--- a/net/irda/ircomm/ircomm_ttp.c
+++ b/net/irda/ircomm/ircomm_ttp.c
@@ -78,7 +78,7 @@ int ircomm_open_tsap(struct ircomm_cb *self)
78{ 78{
79 notify_t notify; 79 notify_t notify;
80 80
81 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 81 IRDA_DEBUG(4, "%s()\n", __func__ );
82 82
83 /* Register callbacks */ 83 /* Register callbacks */
84 irda_notify_init(&notify); 84 irda_notify_init(&notify);
@@ -93,7 +93,7 @@ int ircomm_open_tsap(struct ircomm_cb *self)
93 self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, 93 self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
94 &notify); 94 &notify);
95 if (!self->tsap) { 95 if (!self->tsap) {
96 IRDA_DEBUG(0, "%sfailed to allocate tsap\n", __FUNCTION__ ); 96 IRDA_DEBUG(0, "%sfailed to allocate tsap\n", __func__ );
97 return -1; 97 return -1;
98 } 98 }
99 self->slsap_sel = self->tsap->stsap_sel; 99 self->slsap_sel = self->tsap->stsap_sel;
@@ -121,7 +121,7 @@ static int ircomm_ttp_connect_request(struct ircomm_cb *self,
121{ 121{
122 int ret = 0; 122 int ret = 0;
123 123
124 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 124 IRDA_DEBUG(4, "%s()\n", __func__ );
125 125
126 /* Don't forget to refcount it - should be NULL anyway */ 126 /* Don't forget to refcount it - should be NULL anyway */
127 if(userdata) 127 if(userdata)
@@ -145,7 +145,7 @@ static int ircomm_ttp_connect_response(struct ircomm_cb *self,
145{ 145{
146 int ret; 146 int ret;
147 147
148 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 148 IRDA_DEBUG(4, "%s()\n", __func__ );
149 149
150 /* Don't forget to refcount it - should be NULL anyway */ 150 /* Don't forget to refcount it - should be NULL anyway */
151 if(userdata) 151 if(userdata)
@@ -173,7 +173,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self,
173 173
174 IRDA_ASSERT(skb != NULL, return -1;); 174 IRDA_ASSERT(skb != NULL, return -1;);
175 175
176 IRDA_DEBUG(2, "%s(), clen=%d\n", __FUNCTION__ , clen); 176 IRDA_DEBUG(2, "%s(), clen=%d\n", __func__ , clen);
177 177
178 /* 178 /*
179 * Insert clen field, currently we either send data only, or control 179 * Insert clen field, currently we either send data only, or control
@@ -190,7 +190,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self,
190 190
191 ret = irttp_data_request(self->tsap, skb); 191 ret = irttp_data_request(self->tsap, skb);
192 if (ret) { 192 if (ret) {
193 IRDA_ERROR("%s(), failed\n", __FUNCTION__); 193 IRDA_ERROR("%s(), failed\n", __func__);
194 /* irttp_data_request already free the packet */ 194 /* irttp_data_request already free the packet */
195 } 195 }
196 196
@@ -208,7 +208,7 @@ static int ircomm_ttp_data_indication(void *instance, void *sap,
208{ 208{
209 struct ircomm_cb *self = (struct ircomm_cb *) instance; 209 struct ircomm_cb *self = (struct ircomm_cb *) instance;
210 210
211 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 211 IRDA_DEBUG(4, "%s()\n", __func__ );
212 212
213 IRDA_ASSERT(self != NULL, return -1;); 213 IRDA_ASSERT(self != NULL, return -1;);
214 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); 214 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
@@ -231,7 +231,7 @@ static void ircomm_ttp_connect_confirm(void *instance, void *sap,
231 struct ircomm_cb *self = (struct ircomm_cb *) instance; 231 struct ircomm_cb *self = (struct ircomm_cb *) instance;
232 struct ircomm_info info; 232 struct ircomm_info info;
233 233
234 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 234 IRDA_DEBUG(4, "%s()\n", __func__ );
235 235
236 IRDA_ASSERT(self != NULL, return;); 236 IRDA_ASSERT(self != NULL, return;);
237 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 237 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
@@ -240,7 +240,7 @@ static void ircomm_ttp_connect_confirm(void *instance, void *sap,
240 240
241 if (max_sdu_size != TTP_SAR_DISABLE) { 241 if (max_sdu_size != TTP_SAR_DISABLE) {
242 IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", 242 IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n",
243 __FUNCTION__); 243 __func__);
244 goto out; 244 goto out;
245 } 245 }
246 246
@@ -272,7 +272,7 @@ static void ircomm_ttp_connect_indication(void *instance, void *sap,
272 struct ircomm_cb *self = (struct ircomm_cb *)instance; 272 struct ircomm_cb *self = (struct ircomm_cb *)instance;
273 struct ircomm_info info; 273 struct ircomm_info info;
274 274
275 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 275 IRDA_DEBUG(4, "%s()\n", __func__ );
276 276
277 IRDA_ASSERT(self != NULL, return;); 277 IRDA_ASSERT(self != NULL, return;);
278 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 278 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
@@ -281,7 +281,7 @@ static void ircomm_ttp_connect_indication(void *instance, void *sap,
281 281
282 if (max_sdu_size != TTP_SAR_DISABLE) { 282 if (max_sdu_size != TTP_SAR_DISABLE) {
283 IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", 283 IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n",
284 __FUNCTION__); 284 __func__);
285 goto out; 285 goto out;
286 } 286 }
287 287
@@ -331,7 +331,7 @@ static void ircomm_ttp_disconnect_indication(void *instance, void *sap,
331 struct ircomm_cb *self = (struct ircomm_cb *) instance; 331 struct ircomm_cb *self = (struct ircomm_cb *) instance;
332 struct ircomm_info info; 332 struct ircomm_info info;
333 333
334 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 334 IRDA_DEBUG(2, "%s()\n", __func__ );
335 335
336 IRDA_ASSERT(self != NULL, return;); 336 IRDA_ASSERT(self != NULL, return;);
337 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 337 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
@@ -356,7 +356,7 @@ static void ircomm_ttp_flow_indication(void *instance, void *sap,
356{ 356{
357 struct ircomm_cb *self = (struct ircomm_cb *) instance; 357 struct ircomm_cb *self = (struct ircomm_cb *) instance;
358 358
359 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 359 IRDA_DEBUG(4, "%s()\n", __func__ );
360 360
361 IRDA_ASSERT(self != NULL, return;); 361 IRDA_ASSERT(self != NULL, return;);
362 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 362 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index be627e1f04d8..d2620410cb0a 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -115,7 +115,7 @@ static int __init ircomm_tty_init(void)
115 return -ENOMEM; 115 return -ENOMEM;
116 ircomm_tty = hashbin_new(HB_LOCK); 116 ircomm_tty = hashbin_new(HB_LOCK);
117 if (ircomm_tty == NULL) { 117 if (ircomm_tty == NULL) {
118 IRDA_ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__); 118 IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__);
119 put_tty_driver(driver); 119 put_tty_driver(driver);
120 return -ENOMEM; 120 return -ENOMEM;
121 } 121 }
@@ -133,7 +133,7 @@ static int __init ircomm_tty_init(void)
133 tty_set_operations(driver, &ops); 133 tty_set_operations(driver, &ops);
134 if (tty_register_driver(driver)) { 134 if (tty_register_driver(driver)) {
135 IRDA_ERROR("%s(): Couldn't register serial driver\n", 135 IRDA_ERROR("%s(): Couldn't register serial driver\n",
136 __FUNCTION__); 136 __func__);
137 put_tty_driver(driver); 137 put_tty_driver(driver);
138 return -1; 138 return -1;
139 } 139 }
@@ -142,7 +142,7 @@ static int __init ircomm_tty_init(void)
142 142
143static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self) 143static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self)
144{ 144{
145 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 145 IRDA_DEBUG(0, "%s()\n", __func__ );
146 146
147 IRDA_ASSERT(self != NULL, return;); 147 IRDA_ASSERT(self != NULL, return;);
148 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 148 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -163,12 +163,12 @@ static void __exit ircomm_tty_cleanup(void)
163{ 163{
164 int ret; 164 int ret;
165 165
166 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 166 IRDA_DEBUG(4, "%s()\n", __func__ );
167 167
168 ret = tty_unregister_driver(driver); 168 ret = tty_unregister_driver(driver);
169 if (ret) { 169 if (ret) {
170 IRDA_ERROR("%s(), failed to unregister driver\n", 170 IRDA_ERROR("%s(), failed to unregister driver\n",
171 __FUNCTION__); 171 __func__);
172 return; 172 return;
173 } 173 }
174 174
@@ -187,14 +187,14 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
187 notify_t notify; 187 notify_t notify;
188 int ret = -ENODEV; 188 int ret = -ENODEV;
189 189
190 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 190 IRDA_DEBUG(2, "%s()\n", __func__ );
191 191
192 IRDA_ASSERT(self != NULL, return -1;); 192 IRDA_ASSERT(self != NULL, return -1;);
193 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 193 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
194 194
195 /* Check if already open */ 195 /* Check if already open */
196 if (test_and_set_bit(ASYNC_B_INITIALIZED, &self->flags)) { 196 if (test_and_set_bit(ASYNC_B_INITIALIZED, &self->flags)) {
197 IRDA_DEBUG(2, "%s(), already open so break out!\n", __FUNCTION__ ); 197 IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ );
198 return 0; 198 return 0;
199 } 199 }
200 200
@@ -224,7 +224,7 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
224 /* Connect IrCOMM link with remote device */ 224 /* Connect IrCOMM link with remote device */
225 ret = ircomm_tty_attach_cable(self); 225 ret = ircomm_tty_attach_cable(self);
226 if (ret < 0) { 226 if (ret < 0) {
227 IRDA_ERROR("%s(), error attaching cable!\n", __FUNCTION__); 227 IRDA_ERROR("%s(), error attaching cable!\n", __func__);
228 goto err; 228 goto err;
229 } 229 }
230 230
@@ -249,7 +249,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
249 unsigned long flags; 249 unsigned long flags;
250 struct tty_struct *tty; 250 struct tty_struct *tty;
251 251
252 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 252 IRDA_DEBUG(2, "%s()\n", __func__ );
253 253
254 tty = self->tty; 254 tty = self->tty;
255 255
@@ -260,12 +260,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
260 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ 260 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
261 /* nonblock mode is set or port is not enabled */ 261 /* nonblock mode is set or port is not enabled */
262 self->flags |= ASYNC_NORMAL_ACTIVE; 262 self->flags |= ASYNC_NORMAL_ACTIVE;
263 IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __FUNCTION__ ); 263 IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ );
264 return 0; 264 return 0;
265 } 265 }
266 266
267 if (tty->termios->c_cflag & CLOCAL) { 267 if (tty->termios->c_cflag & CLOCAL) {
268 IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __FUNCTION__ ); 268 IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ );
269 do_clocal = 1; 269 do_clocal = 1;
270 } 270 }
271 271
@@ -368,7 +368,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
368 unsigned long flags; 368 unsigned long flags;
369 int ret; 369 int ret;
370 370
371 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 371 IRDA_DEBUG(2, "%s()\n", __func__ );
372 372
373 line = tty->index; 373 line = tty->index;
374 if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) { 374 if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) {
@@ -381,7 +381,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
381 /* No, so make new instance */ 381 /* No, so make new instance */
382 self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); 382 self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
383 if (self == NULL) { 383 if (self == NULL) {
384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); 384 IRDA_ERROR("%s(), kmalloc failed!\n", __func__);
385 return -ENOMEM; 385 return -ENOMEM;
386 } 386 }
387 387
@@ -420,7 +420,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
420 self->tty = tty; 420 self->tty = tty;
421 spin_unlock_irqrestore(&self->spinlock, flags); 421 spin_unlock_irqrestore(&self->spinlock, flags);
422 422
423 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __FUNCTION__ , tty->driver->name, 423 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
424 self->line, self->open_count); 424 self->line, self->open_count);
425 425
426 /* Not really used by us, but lets do it anyway */ 426 /* Not really used by us, but lets do it anyway */
@@ -442,7 +442,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
442 442
443 if (wait_event_interruptible(self->close_wait, !test_bit(ASYNC_B_CLOSING, &self->flags))) { 443 if (wait_event_interruptible(self->close_wait, !test_bit(ASYNC_B_CLOSING, &self->flags))) {
444 IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", 444 IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n",
445 __FUNCTION__); 445 __func__);
446 return -ERESTARTSYS; 446 return -ERESTARTSYS;
447 } 447 }
448 448
@@ -460,9 +460,9 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
460 self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ 460 self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */
461 /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ 461 /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */
462 self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */ 462 self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */
463 IRDA_DEBUG(2, "%s(), IrCOMM device\n", __FUNCTION__ ); 463 IRDA_DEBUG(2, "%s(), IrCOMM device\n", __func__ );
464 } else { 464 } else {
465 IRDA_DEBUG(2, "%s(), IrLPT device\n", __FUNCTION__ ); 465 IRDA_DEBUG(2, "%s(), IrLPT device\n", __func__ );
466 self->service_type = IRCOMM_3_WIRE_RAW; 466 self->service_type = IRCOMM_3_WIRE_RAW;
467 self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */ 467 self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */
468 } 468 }
@@ -474,7 +474,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
474 ret = ircomm_tty_block_til_ready(self, filp); 474 ret = ircomm_tty_block_til_ready(self, filp);
475 if (ret) { 475 if (ret) {
476 IRDA_DEBUG(2, 476 IRDA_DEBUG(2,
477 "%s(), returning after block_til_ready with %d\n", __FUNCTION__ , 477 "%s(), returning after block_til_ready with %d\n", __func__ ,
478 ret); 478 ret);
479 479
480 return ret; 480 return ret;
@@ -493,7 +493,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
493 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 493 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
494 unsigned long flags; 494 unsigned long flags;
495 495
496 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 496 IRDA_DEBUG(0, "%s()\n", __func__ );
497 497
498 if (!tty) 498 if (!tty)
499 return; 499 return;
@@ -506,7 +506,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
506 if (tty_hung_up_p(filp)) { 506 if (tty_hung_up_p(filp)) {
507 spin_unlock_irqrestore(&self->spinlock, flags); 507 spin_unlock_irqrestore(&self->spinlock, flags);
508 508
509 IRDA_DEBUG(0, "%s(), returning 1\n", __FUNCTION__ ); 509 IRDA_DEBUG(0, "%s(), returning 1\n", __func__ );
510 return; 510 return;
511 } 511 }
512 512
@@ -519,20 +519,20 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
519 * serial port won't be shutdown. 519 * serial port won't be shutdown.
520 */ 520 */
521 IRDA_DEBUG(0, "%s(), bad serial port count; " 521 IRDA_DEBUG(0, "%s(), bad serial port count; "
522 "tty->count is 1, state->count is %d\n", __FUNCTION__ , 522 "tty->count is 1, state->count is %d\n", __func__ ,
523 self->open_count); 523 self->open_count);
524 self->open_count = 1; 524 self->open_count = 1;
525 } 525 }
526 526
527 if (--self->open_count < 0) { 527 if (--self->open_count < 0) {
528 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", 528 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
529 __FUNCTION__, self->line, self->open_count); 529 __func__, self->line, self->open_count);
530 self->open_count = 0; 530 self->open_count = 0;
531 } 531 }
532 if (self->open_count) { 532 if (self->open_count) {
533 spin_unlock_irqrestore(&self->spinlock, flags); 533 spin_unlock_irqrestore(&self->spinlock, flags);
534 534
535 IRDA_DEBUG(0, "%s(), open count > 0\n", __FUNCTION__ ); 535 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
536 return; 536 return;
537 } 537 }
538 538
@@ -608,7 +608,7 @@ static void ircomm_tty_do_softint(struct work_struct *work)
608 unsigned long flags; 608 unsigned long flags;
609 struct sk_buff *skb, *ctrl_skb; 609 struct sk_buff *skb, *ctrl_skb;
610 610
611 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 611 IRDA_DEBUG(2, "%s()\n", __func__ );
612 612
613 if (!self || self->magic != IRCOMM_TTY_MAGIC) 613 if (!self || self->magic != IRCOMM_TTY_MAGIC)
614 return; 614 return;
@@ -678,7 +678,7 @@ static int ircomm_tty_write(struct tty_struct *tty,
678 int len = 0; 678 int len = 0;
679 int size; 679 int size;
680 680
681 IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __FUNCTION__ , count, 681 IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __func__ , count,
682 tty->hw_stopped); 682 tty->hw_stopped);
683 683
684 IRDA_ASSERT(self != NULL, return -1;); 684 IRDA_ASSERT(self != NULL, return -1;);
@@ -701,7 +701,7 @@ static int ircomm_tty_write(struct tty_struct *tty,
701 * we don't mess up the original "safe skb" (see tx_data_size). 701 * we don't mess up the original "safe skb" (see tx_data_size).
702 * Jean II */ 702 * Jean II */
703 if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) { 703 if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) {
704 IRDA_DEBUG(1, "%s() : not initialised\n", __FUNCTION__); 704 IRDA_DEBUG(1, "%s() : not initialised\n", __func__);
705#ifdef IRCOMM_NO_TX_BEFORE_INIT 705#ifdef IRCOMM_NO_TX_BEFORE_INIT
706 /* We didn't consume anything, TTY will retry */ 706 /* We didn't consume anything, TTY will retry */
707 return 0; 707 return 0;
@@ -830,7 +830,7 @@ static int ircomm_tty_write_room(struct tty_struct *tty)
830 ret = self->max_data_size; 830 ret = self->max_data_size;
831 spin_unlock_irqrestore(&self->spinlock, flags); 831 spin_unlock_irqrestore(&self->spinlock, flags);
832 } 832 }
833 IRDA_DEBUG(2, "%s(), ret=%d\n", __FUNCTION__ , ret); 833 IRDA_DEBUG(2, "%s(), ret=%d\n", __func__ , ret);
834 834
835 return ret; 835 return ret;
836} 836}
@@ -847,7 +847,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
847 unsigned long orig_jiffies, poll_time; 847 unsigned long orig_jiffies, poll_time;
848 unsigned long flags; 848 unsigned long flags;
849 849
850 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 850 IRDA_DEBUG(2, "%s()\n", __func__ );
851 851
852 IRDA_ASSERT(self != NULL, return;); 852 IRDA_ASSERT(self != NULL, return;);
853 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 853 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -882,7 +882,7 @@ static void ircomm_tty_throttle(struct tty_struct *tty)
882{ 882{
883 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 883 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
884 884
885 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 885 IRDA_DEBUG(2, "%s()\n", __func__ );
886 886
887 IRDA_ASSERT(self != NULL, return;); 887 IRDA_ASSERT(self != NULL, return;);
888 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 888 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -913,7 +913,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty)
913{ 913{
914 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 914 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
915 915
916 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 916 IRDA_DEBUG(2, "%s()\n", __func__ );
917 917
918 IRDA_ASSERT(self != NULL, return;); 918 IRDA_ASSERT(self != NULL, return;);
919 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 919 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -928,7 +928,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty)
928 self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); 928 self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS);
929 929
930 ircomm_param_request(self, IRCOMM_DTE, TRUE); 930 ircomm_param_request(self, IRCOMM_DTE, TRUE);
931 IRDA_DEBUG(1, "%s(), FLOW_START\n", __FUNCTION__ ); 931 IRDA_DEBUG(1, "%s(), FLOW_START\n", __func__ );
932 } 932 }
933 ircomm_flow_request(self->ircomm, FLOW_START); 933 ircomm_flow_request(self->ircomm, FLOW_START);
934} 934}
@@ -965,7 +965,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
965 IRDA_ASSERT(self != NULL, return;); 965 IRDA_ASSERT(self != NULL, return;);
966 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 966 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
967 967
968 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 968 IRDA_DEBUG(0, "%s()\n", __func__ );
969 969
970 if (!test_and_clear_bit(ASYNC_B_INITIALIZED, &self->flags)) 970 if (!test_and_clear_bit(ASYNC_B_INITIALIZED, &self->flags))
971 return; 971 return;
@@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
1008 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 1008 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
1009 unsigned long flags; 1009 unsigned long flags;
1010 1010
1011 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 1011 IRDA_DEBUG(0, "%s()\n", __func__ );
1012 1012
1013 IRDA_ASSERT(self != NULL, return;); 1013 IRDA_ASSERT(self != NULL, return;);
1014 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 1014 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -1037,7 +1037,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
1037 */ 1037 */
1038static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch) 1038static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch)
1039{ 1039{
1040 IRDA_DEBUG(0, "%s(), not impl\n", __FUNCTION__ ); 1040 IRDA_DEBUG(0, "%s(), not impl\n", __func__ );
1041} 1041}
1042 1042
1043/* 1043/*
@@ -1081,7 +1081,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
1081 struct tty_struct *tty; 1081 struct tty_struct *tty;
1082 int status; 1082 int status;
1083 1083
1084 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 1084 IRDA_DEBUG(0, "%s()\n", __func__ );
1085 1085
1086 IRDA_ASSERT(self != NULL, return;); 1086 IRDA_ASSERT(self != NULL, return;);
1087 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 1087 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -1095,14 +1095,14 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
1095 } 1095 }
1096 if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { 1096 if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) {
1097 IRDA_DEBUG(2, 1097 IRDA_DEBUG(2,
1098 "%s(), ircomm%d CD now %s...\n", __FUNCTION__ , self->line, 1098 "%s(), ircomm%d CD now %s...\n", __func__ , self->line,
1099 (status & IRCOMM_CD) ? "on" : "off"); 1099 (status & IRCOMM_CD) ? "on" : "off");
1100 1100
1101 if (status & IRCOMM_CD) { 1101 if (status & IRCOMM_CD) {
1102 wake_up_interruptible(&self->open_wait); 1102 wake_up_interruptible(&self->open_wait);
1103 } else { 1103 } else {
1104 IRDA_DEBUG(2, 1104 IRDA_DEBUG(2,
1105 "%s(), Doing serial hangup..\n", __FUNCTION__ ); 1105 "%s(), Doing serial hangup..\n", __func__ );
1106 if (tty) 1106 if (tty)
1107 tty_hangup(tty); 1107 tty_hangup(tty);
1108 1108
@@ -1114,7 +1114,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
1114 if (tty->hw_stopped) { 1114 if (tty->hw_stopped) {
1115 if (status & IRCOMM_CTS) { 1115 if (status & IRCOMM_CTS) {
1116 IRDA_DEBUG(2, 1116 IRDA_DEBUG(2,
1117 "%s(), CTS tx start...\n", __FUNCTION__ ); 1117 "%s(), CTS tx start...\n", __func__ );
1118 tty->hw_stopped = 0; 1118 tty->hw_stopped = 0;
1119 1119
1120 /* Wake up processes blocked on open */ 1120 /* Wake up processes blocked on open */
@@ -1126,7 +1126,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
1126 } else { 1126 } else {
1127 if (!(status & IRCOMM_CTS)) { 1127 if (!(status & IRCOMM_CTS)) {
1128 IRDA_DEBUG(2, 1128 IRDA_DEBUG(2,
1129 "%s(), CTS tx stop...\n", __FUNCTION__ ); 1129 "%s(), CTS tx stop...\n", __func__ );
1130 tty->hw_stopped = 1; 1130 tty->hw_stopped = 1;
1131 } 1131 }
1132 } 1132 }
@@ -1144,14 +1144,14 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1144{ 1144{
1145 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 1145 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
1146 1146
1147 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 1147 IRDA_DEBUG(2, "%s()\n", __func__ );
1148 1148
1149 IRDA_ASSERT(self != NULL, return -1;); 1149 IRDA_ASSERT(self != NULL, return -1;);
1150 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 1150 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
1151 IRDA_ASSERT(skb != NULL, return -1;); 1151 IRDA_ASSERT(skb != NULL, return -1;);
1152 1152
1153 if (!self->tty) { 1153 if (!self->tty) {
1154 IRDA_DEBUG(0, "%s(), no tty!\n", __FUNCTION__ ); 1154 IRDA_DEBUG(0, "%s(), no tty!\n", __func__ );
1155 return 0; 1155 return 0;
1156 } 1156 }
1157 1157
@@ -1162,7 +1162,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1162 * params, we can just as well declare the hardware for running. 1162 * params, we can just as well declare the hardware for running.
1163 */ 1163 */
1164 if (self->tty->hw_stopped && (self->flow == FLOW_START)) { 1164 if (self->tty->hw_stopped && (self->flow == FLOW_START)) {
1165 IRDA_DEBUG(0, "%s(), polling for line settings!\n", __FUNCTION__ ); 1165 IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ );
1166 ircomm_param_request(self, IRCOMM_POLL, TRUE); 1166 ircomm_param_request(self, IRCOMM_POLL, TRUE);
1167 1167
1168 /* We can just as well declare the hardware for running */ 1168 /* We can just as well declare the hardware for running */
@@ -1194,7 +1194,7 @@ static int ircomm_tty_control_indication(void *instance, void *sap,
1194 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 1194 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
1195 int clen; 1195 int clen;
1196 1196
1197 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 1197 IRDA_DEBUG(4, "%s()\n", __func__ );
1198 1198
1199 IRDA_ASSERT(self != NULL, return -1;); 1199 IRDA_ASSERT(self != NULL, return -1;);
1200 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 1200 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
@@ -1230,7 +1230,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
1230 1230
1231 switch (cmd) { 1231 switch (cmd) {
1232 case FLOW_START: 1232 case FLOW_START:
1233 IRDA_DEBUG(2, "%s(), hw start!\n", __FUNCTION__ ); 1233 IRDA_DEBUG(2, "%s(), hw start!\n", __func__ );
1234 tty->hw_stopped = 0; 1234 tty->hw_stopped = 0;
1235 1235
1236 /* ircomm_tty_do_softint will take care of the rest */ 1236 /* ircomm_tty_do_softint will take care of the rest */
@@ -1238,7 +1238,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
1238 break; 1238 break;
1239 default: /* If we get here, something is very wrong, better stop */ 1239 default: /* If we get here, something is very wrong, better stop */
1240 case FLOW_STOP: 1240 case FLOW_STOP:
1241 IRDA_DEBUG(2, "%s(), hw stopped!\n", __FUNCTION__ ); 1241 IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ );
1242 tty->hw_stopped = 1; 1242 tty->hw_stopped = 1;
1243 break; 1243 break;
1244 } 1244 }
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index b5a13882c927..9032a1d1190d 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -129,14 +129,14 @@ static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
129 */ 129 */
130int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) 130int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
131{ 131{
132 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 132 IRDA_DEBUG(0, "%s()\n", __func__ );
133 133
134 IRDA_ASSERT(self != NULL, return -1;); 134 IRDA_ASSERT(self != NULL, return -1;);
135 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 135 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
136 136
137 /* Check if somebody has already connected to us */ 137 /* Check if somebody has already connected to us */
138 if (ircomm_is_connected(self->ircomm)) { 138 if (ircomm_is_connected(self->ircomm)) {
139 IRDA_DEBUG(0, "%s(), already connected!\n", __FUNCTION__ ); 139 IRDA_DEBUG(0, "%s(), already connected!\n", __func__ );
140 return 0; 140 return 0;
141 } 141 }
142 142
@@ -158,7 +158,7 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
158 */ 158 */
159void ircomm_tty_detach_cable(struct ircomm_tty_cb *self) 159void ircomm_tty_detach_cable(struct ircomm_tty_cb *self)
160{ 160{
161 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 161 IRDA_DEBUG(0, "%s()\n", __func__ );
162 162
163 IRDA_ASSERT(self != NULL, return;); 163 IRDA_ASSERT(self != NULL, return;);
164 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 164 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -207,7 +207,7 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
207 __u8 oct_seq[6]; 207 __u8 oct_seq[6];
208 __u16 hints; 208 __u16 hints;
209 209
210 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 210 IRDA_DEBUG(0, "%s()\n", __func__ );
211 211
212 IRDA_ASSERT(self != NULL, return;); 212 IRDA_ASSERT(self != NULL, return;);
213 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 213 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -308,16 +308,16 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
308 * Set default values, but only if the application for some reason 308 * Set default values, but only if the application for some reason
309 * haven't set them already 309 * haven't set them already
310 */ 310 */
311 IRDA_DEBUG(2, "%s(), data-rate = %d\n", __FUNCTION__ , 311 IRDA_DEBUG(2, "%s(), data-rate = %d\n", __func__ ,
312 self->settings.data_rate); 312 self->settings.data_rate);
313 if (!self->settings.data_rate) 313 if (!self->settings.data_rate)
314 self->settings.data_rate = 9600; 314 self->settings.data_rate = 9600;
315 IRDA_DEBUG(2, "%s(), data-format = %d\n", __FUNCTION__ , 315 IRDA_DEBUG(2, "%s(), data-format = %d\n", __func__ ,
316 self->settings.data_format); 316 self->settings.data_format);
317 if (!self->settings.data_format) 317 if (!self->settings.data_format)
318 self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */ 318 self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */
319 319
320 IRDA_DEBUG(2, "%s(), flow-control = %d\n", __FUNCTION__ , 320 IRDA_DEBUG(2, "%s(), flow-control = %d\n", __func__ ,
321 self->settings.flow_control); 321 self->settings.flow_control);
322 /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/ 322 /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/
323 323
@@ -362,7 +362,7 @@ static void ircomm_tty_discovery_indication(discinfo_t *discovery,
362 struct ircomm_tty_cb *self; 362 struct ircomm_tty_cb *self;
363 struct ircomm_tty_info info; 363 struct ircomm_tty_info info;
364 364
365 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 365 IRDA_DEBUG(2, "%s()\n", __func__ );
366 366
367 /* Important note : 367 /* Important note :
368 * We need to drop all passive discoveries. 368 * We need to drop all passive discoveries.
@@ -398,7 +398,7 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap,
398{ 398{
399 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 399 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
400 400
401 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 401 IRDA_DEBUG(2, "%s()\n", __func__ );
402 402
403 IRDA_ASSERT(self != NULL, return;); 403 IRDA_ASSERT(self != NULL, return;);
404 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 404 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -428,7 +428,7 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
428{ 428{
429 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv; 429 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv;
430 430
431 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 431 IRDA_DEBUG(2, "%s()\n", __func__ );
432 432
433 IRDA_ASSERT(self != NULL, return;); 433 IRDA_ASSERT(self != NULL, return;);
434 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 434 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -439,13 +439,13 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
439 439
440 /* Check if request succeeded */ 440 /* Check if request succeeded */
441 if (result != IAS_SUCCESS) { 441 if (result != IAS_SUCCESS) {
442 IRDA_DEBUG(4, "%s(), got NULL value!\n", __FUNCTION__ ); 442 IRDA_DEBUG(4, "%s(), got NULL value!\n", __func__ );
443 return; 443 return;
444 } 444 }
445 445
446 switch (value->type) { 446 switch (value->type) {
447 case IAS_OCT_SEQ: 447 case IAS_OCT_SEQ:
448 IRDA_DEBUG(2, "%s(), got octet sequence\n", __FUNCTION__ ); 448 IRDA_DEBUG(2, "%s(), got octet sequence\n", __func__ );
449 449
450 irda_param_extract_all(self, value->t.oct_seq, value->len, 450 irda_param_extract_all(self, value->t.oct_seq, value->len,
451 &ircomm_param_info); 451 &ircomm_param_info);
@@ -455,21 +455,21 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
455 break; 455 break;
456 case IAS_INTEGER: 456 case IAS_INTEGER:
457 /* Got LSAP selector */ 457 /* Got LSAP selector */
458 IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __FUNCTION__ , 458 IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __func__ ,
459 value->t.integer); 459 value->t.integer);
460 460
461 if (value->t.integer == -1) { 461 if (value->t.integer == -1) {
462 IRDA_DEBUG(0, "%s(), invalid value!\n", __FUNCTION__ ); 462 IRDA_DEBUG(0, "%s(), invalid value!\n", __func__ );
463 } else 463 } else
464 self->dlsap_sel = value->t.integer; 464 self->dlsap_sel = value->t.integer;
465 465
466 ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL); 466 ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL);
467 break; 467 break;
468 case IAS_MISSING: 468 case IAS_MISSING:
469 IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __FUNCTION__ ); 469 IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __func__ );
470 break; 470 break;
471 default: 471 default:
472 IRDA_DEBUG(0, "%s(), got unknown type!\n", __FUNCTION__ ); 472 IRDA_DEBUG(0, "%s(), got unknown type!\n", __func__ );
473 break; 473 break;
474 } 474 }
475 irias_delete_value(value); 475 irias_delete_value(value);
@@ -489,7 +489,7 @@ void ircomm_tty_connect_confirm(void *instance, void *sap,
489{ 489{
490 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 490 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
491 491
492 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 492 IRDA_DEBUG(2, "%s()\n", __func__ );
493 493
494 IRDA_ASSERT(self != NULL, return;); 494 IRDA_ASSERT(self != NULL, return;);
495 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 495 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -520,7 +520,7 @@ void ircomm_tty_connect_indication(void *instance, void *sap,
520 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 520 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
521 int clen; 521 int clen;
522 522
523 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 523 IRDA_DEBUG(2, "%s()\n", __func__ );
524 524
525 IRDA_ASSERT(self != NULL, return;); 525 IRDA_ASSERT(self != NULL, return;);
526 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 526 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -549,7 +549,7 @@ void ircomm_tty_connect_indication(void *instance, void *sap,
549 */ 549 */
550void ircomm_tty_link_established(struct ircomm_tty_cb *self) 550void ircomm_tty_link_established(struct ircomm_tty_cb *self)
551{ 551{
552 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 552 IRDA_DEBUG(2, "%s()\n", __func__ );
553 553
554 IRDA_ASSERT(self != NULL, return;); 554 IRDA_ASSERT(self != NULL, return;);
555 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 555 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -566,10 +566,10 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
566 * line. 566 * line.
567 */ 567 */
568 if ((self->flags & ASYNC_CTS_FLOW) && ((self->settings.dce & IRCOMM_CTS) == 0)) { 568 if ((self->flags & ASYNC_CTS_FLOW) && ((self->settings.dce & IRCOMM_CTS) == 0)) {
569 IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __FUNCTION__ ); 569 IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ );
570 return; 570 return;
571 } else { 571 } else {
572 IRDA_DEBUG(1, "%s(), starting hardware!\n", __FUNCTION__ ); 572 IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ );
573 573
574 self->tty->hw_stopped = 0; 574 self->tty->hw_stopped = 0;
575 575
@@ -607,7 +607,7 @@ static void ircomm_tty_watchdog_timer_expired(void *data)
607{ 607{
608 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data; 608 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
609 609
610 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 610 IRDA_DEBUG(2, "%s()\n", __func__ );
611 611
612 IRDA_ASSERT(self != NULL, return;); 612 IRDA_ASSERT(self != NULL, return;);
613 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 613 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -628,7 +628,7 @@ int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
628 IRDA_ASSERT(self != NULL, return -1;); 628 IRDA_ASSERT(self != NULL, return -1;);
629 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 629 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
630 630
631 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , 631 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
632 ircomm_tty_state[self->state], ircomm_tty_event[event]); 632 ircomm_tty_state[self->state], ircomm_tty_event[event]);
633 633
634 return (*state[self->state])(self, event, skb, info); 634 return (*state[self->state])(self, event, skb, info);
@@ -646,7 +646,7 @@ static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_
646 IRDA_ASSERT(self != NULL, return;); 646 IRDA_ASSERT(self != NULL, return;);
647 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 647 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
648 648
649 IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __FUNCTION__ , 649 IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __func__ ,
650 ircomm_tty_state[self->state], self->service_type); 650 ircomm_tty_state[self->state], self->service_type);
651 */ 651 */
652 self->state = state; 652 self->state = state;
@@ -665,7 +665,7 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
665{ 665{
666 int ret = 0; 666 int ret = 0;
667 667
668 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , 668 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
669 ircomm_tty_state[self->state], ircomm_tty_event[event]); 669 ircomm_tty_state[self->state], ircomm_tty_event[event]);
670 switch (event) { 670 switch (event) {
671 case IRCOMM_TTY_ATTACH_CABLE: 671 case IRCOMM_TTY_ATTACH_CABLE:
@@ -681,7 +681,7 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
681 681
682 if (self->iriap) { 682 if (self->iriap) {
683 IRDA_WARNING("%s(), busy with a previous query\n", 683 IRDA_WARNING("%s(), busy with a previous query\n",
684 __FUNCTION__); 684 __func__);
685 return -EBUSY; 685 return -EBUSY;
686 } 686 }
687 687
@@ -709,7 +709,7 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
709 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); 709 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
710 break; 710 break;
711 default: 711 default:
712 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , 712 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
713 ircomm_tty_event[event]); 713 ircomm_tty_event[event]);
714 ret = -EINVAL; 714 ret = -EINVAL;
715 } 715 }
@@ -729,7 +729,7 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
729{ 729{
730 int ret = 0; 730 int ret = 0;
731 731
732 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , 732 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
733 ircomm_tty_state[self->state], ircomm_tty_event[event]); 733 ircomm_tty_state[self->state], ircomm_tty_event[event]);
734 734
735 switch (event) { 735 switch (event) {
@@ -739,7 +739,7 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
739 739
740 if (self->iriap) { 740 if (self->iriap) {
741 IRDA_WARNING("%s(), busy with a previous query\n", 741 IRDA_WARNING("%s(), busy with a previous query\n",
742 __FUNCTION__); 742 __func__);
743 return -EBUSY; 743 return -EBUSY;
744 } 744 }
745 745
@@ -782,7 +782,7 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
782 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); 782 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
783 break; 783 break;
784 default: 784 default:
785 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , 785 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
786 ircomm_tty_event[event]); 786 ircomm_tty_event[event]);
787 ret = -EINVAL; 787 ret = -EINVAL;
788 } 788 }
@@ -802,14 +802,14 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
802{ 802{
803 int ret = 0; 803 int ret = 0;
804 804
805 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , 805 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
806 ircomm_tty_state[self->state], ircomm_tty_event[event]); 806 ircomm_tty_state[self->state], ircomm_tty_event[event]);
807 807
808 switch (event) { 808 switch (event) {
809 case IRCOMM_TTY_GOT_PARAMETERS: 809 case IRCOMM_TTY_GOT_PARAMETERS:
810 if (self->iriap) { 810 if (self->iriap) {
811 IRDA_WARNING("%s(), busy with a previous query\n", 811 IRDA_WARNING("%s(), busy with a previous query\n",
812 __FUNCTION__); 812 __func__);
813 return -EBUSY; 813 return -EBUSY;
814 } 814 }
815 815
@@ -840,7 +840,7 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
840 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); 840 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
841 break; 841 break;
842 default: 842 default:
843 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , 843 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
844 ircomm_tty_event[event]); 844 ircomm_tty_event[event]);
845 ret = -EINVAL; 845 ret = -EINVAL;
846 } 846 }
@@ -860,7 +860,7 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
860{ 860{
861 int ret = 0; 861 int ret = 0;
862 862
863 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , 863 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
864 ircomm_tty_state[self->state], ircomm_tty_event[event]); 864 ircomm_tty_state[self->state], ircomm_tty_event[event]);
865 865
866 switch (event) { 866 switch (event) {
@@ -889,7 +889,7 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
889 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); 889 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
890 break; 890 break;
891 default: 891 default:
892 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , 892 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
893 ircomm_tty_event[event]); 893 ircomm_tty_event[event]);
894 ret = -EINVAL; 894 ret = -EINVAL;
895 } 895 }
@@ -909,7 +909,7 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
909{ 909{
910 int ret = 0; 910 int ret = 0;
911 911
912 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , 912 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
913 ircomm_tty_state[self->state], ircomm_tty_event[event]); 913 ircomm_tty_state[self->state], ircomm_tty_event[event]);
914 914
915 switch (event) { 915 switch (event) {
@@ -943,7 +943,7 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
943 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); 943 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
944 break; 944 break;
945 default: 945 default:
946 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , 946 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
947 ircomm_tty_event[event]); 947 ircomm_tty_event[event]);
948 ret = -EINVAL; 948 ret = -EINVAL;
949 } 949 }
@@ -981,13 +981,13 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
981 self->settings.dce = IRCOMM_DELTA_CD; 981 self->settings.dce = IRCOMM_DELTA_CD;
982 ircomm_tty_check_modem_status(self); 982 ircomm_tty_check_modem_status(self);
983 } else { 983 } else {
984 IRDA_DEBUG(0, "%s(), hanging up!\n", __FUNCTION__ ); 984 IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ );
985 if (self->tty) 985 if (self->tty)
986 tty_hangup(self->tty); 986 tty_hangup(self->tty);
987 } 987 }
988 break; 988 break;
989 default: 989 default:
990 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , 990 IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
991 ircomm_tty_event[event]); 991 ircomm_tty_event[event]);
992 ret = -EINVAL; 992 ret = -EINVAL;
993 } 993 }
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index 6030947b6d93..24cb3aa2bbfb 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -57,7 +57,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
57 unsigned cflag, cval; 57 unsigned cflag, cval;
58 int baud; 58 int baud;
59 59
60 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 60 IRDA_DEBUG(2, "%s()\n", __func__ );
61 61
62 if (!self->tty || !self->tty->termios || !self->ircomm) 62 if (!self->tty || !self->tty->termios || !self->ircomm)
63 return; 63 return;
@@ -94,7 +94,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
94 self->settings.flow_control |= IRCOMM_RTS_CTS_IN; 94 self->settings.flow_control |= IRCOMM_RTS_CTS_IN;
95 /* This got me. Bummer. Jean II */ 95 /* This got me. Bummer. Jean II */
96 if (self->service_type == IRCOMM_3_WIRE_RAW) 96 if (self->service_type == IRCOMM_3_WIRE_RAW)
97 IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __FUNCTION__); 97 IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __func__);
98 } else { 98 } else {
99 self->flags &= ~ASYNC_CTS_FLOW; 99 self->flags &= ~ASYNC_CTS_FLOW;
100 self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; 100 self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN;
@@ -150,7 +150,7 @@ void ircomm_tty_set_termios(struct tty_struct *tty,
150 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 150 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
151 unsigned int cflag = tty->termios->c_cflag; 151 unsigned int cflag = tty->termios->c_cflag;
152 152
153 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 153 IRDA_DEBUG(2, "%s()\n", __func__ );
154 154
155 if ((cflag == old_termios->c_cflag) && 155 if ((cflag == old_termios->c_cflag) &&
156 (RELEVANT_IFLAG(tty->termios->c_iflag) == 156 (RELEVANT_IFLAG(tty->termios->c_iflag) ==
@@ -199,7 +199,7 @@ int ircomm_tty_tiocmget(struct tty_struct *tty, struct file *file)
199 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 199 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
200 unsigned int result; 200 unsigned int result;
201 201
202 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 202 IRDA_DEBUG(2, "%s()\n", __func__ );
203 203
204 if (tty->flags & (1 << TTY_IO_ERROR)) 204 if (tty->flags & (1 << TTY_IO_ERROR))
205 return -EIO; 205 return -EIO;
@@ -224,7 +224,7 @@ int ircomm_tty_tiocmset(struct tty_struct *tty, struct file *file,
224{ 224{
225 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 225 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
226 226
227 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 227 IRDA_DEBUG(2, "%s()\n", __func__ );
228 228
229 if (tty->flags & (1 << TTY_IO_ERROR)) 229 if (tty->flags & (1 << TTY_IO_ERROR))
230 return -EIO; 230 return -EIO;
@@ -266,7 +266,7 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
266 if (!retinfo) 266 if (!retinfo)
267 return -EFAULT; 267 return -EFAULT;
268 268
269 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 269 IRDA_DEBUG(2, "%s()\n", __func__ );
270 270
271 memset(&info, 0, sizeof(info)); 271 memset(&info, 0, sizeof(info));
272 info.line = self->line; 272 info.line = self->line;
@@ -302,7 +302,7 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self,
302 struct serial_struct new_serial; 302 struct serial_struct new_serial;
303 struct ircomm_tty_cb old_state, *state; 303 struct ircomm_tty_cb old_state, *state;
304 304
305 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 305 IRDA_DEBUG(0, "%s()\n", __func__ );
306 306
307 if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) 307 if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
308 return -EFAULT; 308 return -EFAULT;
@@ -376,7 +376,7 @@ int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
376 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 376 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
377 int ret = 0; 377 int ret = 0;
378 378
379 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 379 IRDA_DEBUG(2, "%s()\n", __func__ );
380 380
381 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 381 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
382 (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && 382 (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
@@ -397,7 +397,7 @@ int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
397 break; 397 break;
398 398
399 case TIOCGICOUNT: 399 case TIOCGICOUNT:
400 IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __FUNCTION__ ); 400 IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __func__ );
401#if 0 401#if 0
402 save_flags(flags); cli(); 402 save_flags(flags); cli();
403 cnow = driver->icount; 403 cnow = driver->icount;
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 87185910d0ee..ea319e3ddc18 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -90,7 +90,7 @@ static void leftover_dongle(void *arg)
90 90
91void irda_device_cleanup(void) 91void irda_device_cleanup(void)
92{ 92{
93 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 93 IRDA_DEBUG(4, "%s()\n", __func__);
94 94
95 hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete); 95 hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete);
96 96
@@ -107,7 +107,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
107{ 107{
108 struct irlap_cb *self; 108 struct irlap_cb *self;
109 109
110 IRDA_DEBUG(4, "%s(%s)\n", __FUNCTION__, status ? "TRUE" : "FALSE"); 110 IRDA_DEBUG(4, "%s(%s)\n", __func__, status ? "TRUE" : "FALSE");
111 111
112 self = (struct irlap_cb *) dev->atalk_ptr; 112 self = (struct irlap_cb *) dev->atalk_ptr;
113 113
@@ -147,11 +147,11 @@ int irda_device_is_receiving(struct net_device *dev)
147 struct if_irda_req req; 147 struct if_irda_req req;
148 int ret; 148 int ret;
149 149
150 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 150 IRDA_DEBUG(2, "%s()\n", __func__);
151 151
152 if (!dev->do_ioctl) { 152 if (!dev->do_ioctl) {
153 IRDA_ERROR("%s: do_ioctl not impl. by device driver\n", 153 IRDA_ERROR("%s: do_ioctl not impl. by device driver\n",
154 __FUNCTION__); 154 __func__);
155 return -1; 155 return -1;
156 } 156 }
157 157
@@ -191,7 +191,7 @@ static int irda_task_kick(struct irda_task *task)
191 int count = 0; 191 int count = 0;
192 int timeout; 192 int timeout;
193 193
194 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 194 IRDA_DEBUG(2, "%s()\n", __func__);
195 195
196 IRDA_ASSERT(task != NULL, return -1;); 196 IRDA_ASSERT(task != NULL, return -1;);
197 IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); 197 IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;);
@@ -201,14 +201,14 @@ static int irda_task_kick(struct irda_task *task)
201 timeout = task->function(task); 201 timeout = task->function(task);
202 if (count++ > 100) { 202 if (count++ > 100) {
203 IRDA_ERROR("%s: error in task handler!\n", 203 IRDA_ERROR("%s: error in task handler!\n",
204 __FUNCTION__); 204 __func__);
205 irda_task_delete(task); 205 irda_task_delete(task);
206 return TRUE; 206 return TRUE;
207 } 207 }
208 } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); 208 } while ((timeout == 0) && (task->state != IRDA_TASK_DONE));
209 209
210 if (timeout < 0) { 210 if (timeout < 0) {
211 IRDA_ERROR("%s: Error executing task!\n", __FUNCTION__); 211 IRDA_ERROR("%s: Error executing task!\n", __func__);
212 irda_task_delete(task); 212 irda_task_delete(task);
213 return TRUE; 213 return TRUE;
214 } 214 }
@@ -241,7 +241,7 @@ static int irda_task_kick(struct irda_task *task)
241 finished = FALSE; 241 finished = FALSE;
242 } else { 242 } else {
243 IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n", 243 IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n",
244 __FUNCTION__); 244 __func__);
245 finished = FALSE; 245 finished = FALSE;
246 } 246 }
247 247
@@ -258,7 +258,7 @@ static void irda_task_timer_expired(void *data)
258{ 258{
259 struct irda_task *task; 259 struct irda_task *task;
260 260
261 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 261 IRDA_DEBUG(2, "%s()\n", __func__);
262 262
263 task = (struct irda_task *) data; 263 task = (struct irda_task *) data;
264 264
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 390a790886eb..9e15c82960fe 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -108,7 +108,7 @@ int __init iriap_init(void)
108 irias_objects = hashbin_new(HB_LOCK); 108 irias_objects = hashbin_new(HB_LOCK);
109 if (!irias_objects) { 109 if (!irias_objects) {
110 IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n", 110 IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n",
111 __FUNCTION__); 111 __func__);
112 hashbin_delete(iriap, NULL); 112 hashbin_delete(iriap, NULL);
113 return -ENOMEM; 113 return -ENOMEM;
114 } 114 }
@@ -139,7 +139,7 @@ int __init iriap_init(void)
139 */ 139 */
140 server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); 140 server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
141 if (!server) { 141 if (!server) {
142 IRDA_DEBUG(0, "%s(), unable to open server\n", __FUNCTION__); 142 IRDA_DEBUG(0, "%s(), unable to open server\n", __func__);
143 return -1; 143 return -1;
144 } 144 }
145 iriap_register_lsap(server, LSAP_IAS, IAS_SERVER); 145 iriap_register_lsap(server, LSAP_IAS, IAS_SERVER);
@@ -171,11 +171,11 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
171{ 171{
172 struct iriap_cb *self; 172 struct iriap_cb *self;
173 173
174 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 174 IRDA_DEBUG(2, "%s()\n", __func__);
175 175
176 self = kzalloc(sizeof(*self), GFP_ATOMIC); 176 self = kzalloc(sizeof(*self), GFP_ATOMIC);
177 if (!self) { 177 if (!self) {
178 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 178 IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
179 return NULL; 179 return NULL;
180 } 180 }
181 181
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(iriap_open);
217 */ 217 */
218static void __iriap_close(struct iriap_cb *self) 218static void __iriap_close(struct iriap_cb *self)
219{ 219{
220 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 220 IRDA_DEBUG(4, "%s()\n", __func__);
221 221
222 IRDA_ASSERT(self != NULL, return;); 222 IRDA_ASSERT(self != NULL, return;);
223 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 223 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -241,7 +241,7 @@ void iriap_close(struct iriap_cb *self)
241{ 241{
242 struct iriap_cb *entry; 242 struct iriap_cb *entry;
243 243
244 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 244 IRDA_DEBUG(2, "%s()\n", __func__);
245 245
246 IRDA_ASSERT(self != NULL, return;); 246 IRDA_ASSERT(self != NULL, return;);
247 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 247 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -262,7 +262,7 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode)
262{ 262{
263 notify_t notify; 263 notify_t notify;
264 264
265 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 265 IRDA_DEBUG(2, "%s()\n", __func__);
266 266
267 irda_notify_init(&notify); 267 irda_notify_init(&notify);
268 notify.connect_confirm = iriap_connect_confirm; 268 notify.connect_confirm = iriap_connect_confirm;
@@ -277,7 +277,7 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode)
277 277
278 self->lsap = irlmp_open_lsap(slsap_sel, &notify, 0); 278 self->lsap = irlmp_open_lsap(slsap_sel, &notify, 0);
279 if (self->lsap == NULL) { 279 if (self->lsap == NULL) {
280 IRDA_ERROR("%s: Unable to allocated LSAP!\n", __FUNCTION__); 280 IRDA_ERROR("%s: Unable to allocated LSAP!\n", __func__);
281 return -1; 281 return -1;
282 } 282 }
283 self->slsap_sel = self->lsap->slsap_sel; 283 self->slsap_sel = self->lsap->slsap_sel;
@@ -297,7 +297,7 @@ static void iriap_disconnect_indication(void *instance, void *sap,
297{ 297{
298 struct iriap_cb *self; 298 struct iriap_cb *self;
299 299
300 IRDA_DEBUG(4, "%s(), reason=%s\n", __FUNCTION__, irlmp_reasons[reason]); 300 IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]);
301 301
302 self = (struct iriap_cb *) instance; 302 self = (struct iriap_cb *) instance;
303 303
@@ -313,7 +313,7 @@ static void iriap_disconnect_indication(void *instance, void *sap,
313 dev_kfree_skb(skb); 313 dev_kfree_skb(skb);
314 314
315 if (self->mode == IAS_CLIENT) { 315 if (self->mode == IAS_CLIENT) {
316 IRDA_DEBUG(4, "%s(), disconnect as client\n", __FUNCTION__); 316 IRDA_DEBUG(4, "%s(), disconnect as client\n", __func__);
317 317
318 318
319 iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION, 319 iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION,
@@ -326,7 +326,7 @@ static void iriap_disconnect_indication(void *instance, void *sap,
326 if (self->confirm) 326 if (self->confirm)
327 self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); 327 self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
328 } else { 328 } else {
329 IRDA_DEBUG(4, "%s(), disconnect as server\n", __FUNCTION__); 329 IRDA_DEBUG(4, "%s(), disconnect as server\n", __func__);
330 iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION, 330 iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION,
331 NULL); 331 NULL);
332 iriap_close(self); 332 iriap_close(self);
@@ -340,7 +340,7 @@ static void iriap_disconnect_request(struct iriap_cb *self)
340{ 340{
341 struct sk_buff *tx_skb; 341 struct sk_buff *tx_skb;
342 342
343 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 343 IRDA_DEBUG(4, "%s()\n", __func__);
344 344
345 IRDA_ASSERT(self != NULL, return;); 345 IRDA_ASSERT(self != NULL, return;);
346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -349,7 +349,7 @@ static void iriap_disconnect_request(struct iriap_cb *self)
349 if (tx_skb == NULL) { 349 if (tx_skb == NULL) {
350 IRDA_DEBUG(0, 350 IRDA_DEBUG(0,
351 "%s(), Could not allocate an sk_buff of length %d\n", 351 "%s(), Could not allocate an sk_buff of length %d\n",
352 __FUNCTION__, LMP_MAX_HEADER); 352 __func__, LMP_MAX_HEADER);
353 return; 353 return;
354 } 354 }
355 355
@@ -453,13 +453,13 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
453 /* Get length, MSB first */ 453 /* Get length, MSB first */
454 len = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2; 454 len = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2;
455 455
456 IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len); 456 IRDA_DEBUG(4, "%s(), len=%d\n", __func__, len);
457 457
458 /* Get object ID, MSB first */ 458 /* Get object ID, MSB first */
459 obj_id = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2; 459 obj_id = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2;
460 460
461 type = fp[n++]; 461 type = fp[n++];
462 IRDA_DEBUG(4, "%s(), Value type = %d\n", __FUNCTION__, type); 462 IRDA_DEBUG(4, "%s(), Value type = %d\n", __func__, type);
463 463
464 switch (type) { 464 switch (type) {
465 case IAS_INTEGER: 465 case IAS_INTEGER:
@@ -468,7 +468,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
468 value = irias_new_integer_value(tmp_cpu32); 468 value = irias_new_integer_value(tmp_cpu32);
469 469
470 /* Legal values restricted to 0x01-0x6f, page 15 irttp */ 470 /* Legal values restricted to 0x01-0x6f, page 15 irttp */
471 IRDA_DEBUG(4, "%s(), lsap=%d\n", __FUNCTION__, value->t.integer); 471 IRDA_DEBUG(4, "%s(), lsap=%d\n", __func__, value->t.integer);
472 break; 472 break;
473 case IAS_STRING: 473 case IAS_STRING:
474 charset = fp[n++]; 474 charset = fp[n++];
@@ -488,7 +488,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
488/* case CS_UNICODE: */ 488/* case CS_UNICODE: */
489 default: 489 default:
490 IRDA_DEBUG(0, "%s(), charset %s, not supported\n", 490 IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
491 __FUNCTION__, ias_charset_types[charset]); 491 __func__, ias_charset_types[charset]);
492 492
493 /* Aborting, close connection! */ 493 /* Aborting, close connection! */
494 iriap_disconnect_request(self); 494 iriap_disconnect_request(self);
@@ -496,7 +496,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
496 /* break; */ 496 /* break; */
497 } 497 }
498 value_len = fp[n++]; 498 value_len = fp[n++];
499 IRDA_DEBUG(4, "%s(), strlen=%d\n", __FUNCTION__, value_len); 499 IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
500 500
501 /* Make sure the string is null-terminated */ 501 /* Make sure the string is null-terminated */
502 fp[n+value_len] = 0x00; 502 fp[n+value_len] = 0x00;
@@ -526,7 +526,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
526 if (self->confirm) 526 if (self->confirm)
527 self->confirm(IAS_SUCCESS, obj_id, value, self->priv); 527 self->confirm(IAS_SUCCESS, obj_id, value, self->priv);
528 else { 528 else {
529 IRDA_DEBUG(0, "%s(), missing handler!\n", __FUNCTION__); 529 IRDA_DEBUG(0, "%s(), missing handler!\n", __func__);
530 irias_delete_value(value); 530 irias_delete_value(value);
531 } 531 }
532} 532}
@@ -548,7 +548,7 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
548 __be16 tmp_be16; 548 __be16 tmp_be16;
549 __u8 *fp; 549 __u8 *fp;
550 550
551 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 551 IRDA_DEBUG(4, "%s()\n", __func__);
552 552
553 IRDA_ASSERT(self != NULL, return;); 553 IRDA_ASSERT(self != NULL, return;);
554 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 554 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -610,12 +610,12 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
610 memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len; 610 memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len;
611 break; 611 break;
612 case IAS_MISSING: 612 case IAS_MISSING:
613 IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __FUNCTION__); 613 IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __func__);
614 skb_put(tx_skb, 1); 614 skb_put(tx_skb, 1);
615 fp[n++] = value->type; 615 fp[n++] = value->type;
616 break; 616 break;
617 default: 617 default:
618 IRDA_DEBUG(0, "%s(), type not implemented!\n", __FUNCTION__); 618 IRDA_DEBUG(0, "%s(), type not implemented!\n", __func__);
619 break; 619 break;
620 } 620 }
621 iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb); 621 iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb);
@@ -642,7 +642,7 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
642 __u8 *fp; 642 __u8 *fp;
643 int n; 643 int n;
644 644
645 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 645 IRDA_DEBUG(4, "%s()\n", __func__);
646 646
647 IRDA_ASSERT(self != NULL, return;); 647 IRDA_ASSERT(self != NULL, return;);
648 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 648 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -697,7 +697,7 @@ void iriap_send_ack(struct iriap_cb *self)
697 struct sk_buff *tx_skb; 697 struct sk_buff *tx_skb;
698 __u8 *frame; 698 __u8 *frame;
699 699
700 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 700 IRDA_DEBUG(2, "%s()\n", __func__);
701 701
702 IRDA_ASSERT(self != NULL, return;); 702 IRDA_ASSERT(self != NULL, return;);
703 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 703 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -728,7 +728,7 @@ void iriap_connect_request(struct iriap_cb *self)
728 self->saddr, self->daddr, 728 self->saddr, self->daddr,
729 NULL, NULL); 729 NULL, NULL);
730 if (ret < 0) { 730 if (ret < 0) {
731 IRDA_DEBUG(0, "%s(), connect failed!\n", __FUNCTION__); 731 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
732 self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); 732 self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
733 } 733 }
734} 734}
@@ -776,7 +776,7 @@ static void iriap_connect_indication(void *instance, void *sap,
776{ 776{
777 struct iriap_cb *self, *new; 777 struct iriap_cb *self, *new;
778 778
779 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 779 IRDA_DEBUG(1, "%s()\n", __func__);
780 780
781 self = (struct iriap_cb *) instance; 781 self = (struct iriap_cb *) instance;
782 782
@@ -787,14 +787,14 @@ static void iriap_connect_indication(void *instance, void *sap,
787 /* Start new server */ 787 /* Start new server */
788 new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); 788 new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
789 if (!new) { 789 if (!new) {
790 IRDA_DEBUG(0, "%s(), open failed\n", __FUNCTION__); 790 IRDA_DEBUG(0, "%s(), open failed\n", __func__);
791 goto out; 791 goto out;
792 } 792 }
793 793
794 /* Now attach up the new "socket" */ 794 /* Now attach up the new "socket" */
795 new->lsap = irlmp_dup(self->lsap, new); 795 new->lsap = irlmp_dup(self->lsap, new);
796 if (!new->lsap) { 796 if (!new->lsap) {
797 IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__); 797 IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
798 goto out; 798 goto out;
799 } 799 }
800 800
@@ -824,7 +824,7 @@ static int iriap_data_indication(void *instance, void *sap,
824 __u8 *frame; 824 __u8 *frame;
825 __u8 opcode; 825 __u8 opcode;
826 826
827 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 827 IRDA_DEBUG(3, "%s()\n", __func__);
828 828
829 self = (struct iriap_cb *) instance; 829 self = (struct iriap_cb *) instance;
830 830
@@ -836,7 +836,7 @@ static int iriap_data_indication(void *instance, void *sap,
836 836
837 if (self->mode == IAS_SERVER) { 837 if (self->mode == IAS_SERVER) {
838 /* Call server */ 838 /* Call server */
839 IRDA_DEBUG(4, "%s(), Calling server!\n", __FUNCTION__); 839 IRDA_DEBUG(4, "%s(), Calling server!\n", __func__);
840 iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb); 840 iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb);
841 goto out; 841 goto out;
842 } 842 }
@@ -844,13 +844,13 @@ static int iriap_data_indication(void *instance, void *sap,
844 if (~opcode & IAP_LST) { 844 if (~opcode & IAP_LST) {
845 IRDA_WARNING("%s:, IrIAS multiframe commands or " 845 IRDA_WARNING("%s:, IrIAS multiframe commands or "
846 "results is not implemented yet!\n", 846 "results is not implemented yet!\n",
847 __FUNCTION__); 847 __func__);
848 goto out; 848 goto out;
849 } 849 }
850 850
851 /* Check for ack frames since they don't contain any data */ 851 /* Check for ack frames since they don't contain any data */
852 if (opcode & IAP_ACK) { 852 if (opcode & IAP_ACK) {
853 IRDA_DEBUG(0, "%s() Got ack frame!\n", __FUNCTION__); 853 IRDA_DEBUG(0, "%s() Got ack frame!\n", __func__);
854 goto out; 854 goto out;
855 } 855 }
856 856
@@ -868,7 +868,7 @@ static int iriap_data_indication(void *instance, void *sap,
868 iriap_getvaluebyclass_confirm(self, skb); 868 iriap_getvaluebyclass_confirm(self, skb);
869 break; 869 break;
870 case IAS_CLASS_UNKNOWN: 870 case IAS_CLASS_UNKNOWN:
871 IRDA_DEBUG(1, "%s(), No such class!\n", __FUNCTION__); 871 IRDA_DEBUG(1, "%s(), No such class!\n", __func__);
872 /* Finished, close connection! */ 872 /* Finished, close connection! */
873 iriap_disconnect_request(self); 873 iriap_disconnect_request(self);
874 874
@@ -881,7 +881,7 @@ static int iriap_data_indication(void *instance, void *sap,
881 self->priv); 881 self->priv);
882 break; 882 break;
883 case IAS_ATTRIB_UNKNOWN: 883 case IAS_ATTRIB_UNKNOWN:
884 IRDA_DEBUG(1, "%s(), No such attribute!\n", __FUNCTION__); 884 IRDA_DEBUG(1, "%s(), No such attribute!\n", __func__);
885 /* Finished, close connection! */ 885 /* Finished, close connection! */
886 iriap_disconnect_request(self); 886 iriap_disconnect_request(self);
887 887
@@ -896,7 +896,7 @@ static int iriap_data_indication(void *instance, void *sap,
896 } 896 }
897 break; 897 break;
898 default: 898 default:
899 IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __FUNCTION__, 899 IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __func__,
900 opcode); 900 opcode);
901 break; 901 break;
902 } 902 }
@@ -918,7 +918,7 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
918 __u8 *fp; 918 __u8 *fp;
919 __u8 opcode; 919 __u8 opcode;
920 920
921 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 921 IRDA_DEBUG(4, "%s()\n", __func__);
922 922
923 IRDA_ASSERT(self != NULL, return;); 923 IRDA_ASSERT(self != NULL, return;);
924 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 924 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -929,7 +929,7 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
929 opcode = fp[0]; 929 opcode = fp[0];
930 if (~opcode & 0x80) { 930 if (~opcode & 0x80) {
931 IRDA_WARNING("%s: IrIAS multiframe commands or results " 931 IRDA_WARNING("%s: IrIAS multiframe commands or results "
932 "is not implemented yet!\n", __FUNCTION__); 932 "is not implemented yet!\n", __func__);
933 return; 933 return;
934 } 934 }
935 opcode &= 0x7f; /* Mask away LST bit */ 935 opcode &= 0x7f; /* Mask away LST bit */
@@ -937,7 +937,7 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
937 switch (opcode) { 937 switch (opcode) {
938 case GET_INFO_BASE: 938 case GET_INFO_BASE:
939 IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n", 939 IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n",
940 __FUNCTION__); 940 __func__);
941 break; 941 break;
942 case GET_VALUE_BY_CLASS: 942 case GET_VALUE_BY_CLASS:
943 iriap_getvaluebyclass_indication(self, skb); 943 iriap_getvaluebyclass_indication(self, skb);
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index 8fb9d7277ca8..a301cbd93785 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -185,7 +185,7 @@ static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
185 case IAP_LM_DISCONNECT_INDICATION: 185 case IAP_LM_DISCONNECT_INDICATION:
186 break; 186 break;
187 default: 187 default:
188 IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); 188 IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
189 break; 189 break;
190 } 190 }
191} 191}
@@ -217,7 +217,7 @@ static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event,
217 iriap_next_client_state(self, S_DISCONNECT); 217 iriap_next_client_state(self, S_DISCONNECT);
218 break; 218 break;
219 default: 219 default:
220 IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); 220 IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
221 break; 221 break;
222 } 222 }
223} 223}
@@ -269,7 +269,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event,
269 iriap_next_call_state(self, S_OUTSTANDING); 269 iriap_next_call_state(self, S_OUTSTANDING);
270 break; 270 break;
271 default: 271 default:
272 IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); 272 IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
273 break; 273 break;
274 } 274 }
275} 275}
@@ -283,7 +283,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event,
283static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event, 283static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event,
284 struct sk_buff *skb) 284 struct sk_buff *skb)
285{ 285{
286 IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); 286 IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
287} 287}
288 288
289/* 289/*
@@ -305,7 +305,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event,
305 iriap_next_call_state(self, S_WAIT_FOR_CALL); 305 iriap_next_call_state(self, S_WAIT_FOR_CALL);
306 break; 306 break;
307 default: 307 default:
308 IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); 308 IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
309 break; 309 break;
310 } 310 }
311} 311}
@@ -318,7 +318,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event,
318static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, 318static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event,
319 struct sk_buff *skb) 319 struct sk_buff *skb)
320{ 320{
321 IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); 321 IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
322} 322}
323 323
324/* 324/*
@@ -330,7 +330,7 @@ static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event,
330static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, 330static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
331 struct sk_buff *skb) 331 struct sk_buff *skb)
332{ 332{
333 IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); 333 IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
334} 334}
335 335
336 336
@@ -343,7 +343,7 @@ static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
343static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event, 343static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
344 struct sk_buff *skb) 344 struct sk_buff *skb)
345{ 345{
346 IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); 346 IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
347} 347}
348 348
349/************************************************************************** 349/**************************************************************************
@@ -367,7 +367,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
367 case IAP_LM_CONNECT_INDICATION: 367 case IAP_LM_CONNECT_INDICATION:
368 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); 368 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
369 if (tx_skb == NULL) { 369 if (tx_skb == NULL) {
370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); 370 IRDA_WARNING("%s: unable to malloc!\n", __func__);
371 return; 371 return;
372 } 372 }
373 373
@@ -386,7 +386,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
386 iriap_next_r_connect_state(self, R_RECEIVING); 386 iriap_next_r_connect_state(self, R_RECEIVING);
387 break; 387 break;
388 default: 388 default:
389 IRDA_DEBUG(0, "%s(), unknown event %d\n", __FUNCTION__, event); 389 IRDA_DEBUG(0, "%s(), unknown event %d\n", __func__, event);
390 break; 390 break;
391 } 391 }
392} 392}
@@ -397,7 +397,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
397static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, 397static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
398 struct sk_buff *skb) 398 struct sk_buff *skb)
399{ 399{
400 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 400 IRDA_DEBUG(4, "%s()\n", __func__);
401 401
402 switch (event) { 402 switch (event) {
403 case IAP_LM_DISCONNECT_INDICATION: 403 case IAP_LM_DISCONNECT_INDICATION:
@@ -406,7 +406,7 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
406 iriap_next_r_connect_state(self, R_WAITING); 406 iriap_next_r_connect_state(self, R_WAITING);
407 break; 407 break;
408 default: 408 default:
409 IRDA_DEBUG(0, "%s(), unknown event!\n", __FUNCTION__); 409 IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
410 break; 410 break;
411 } 411 }
412} 412}
@@ -421,13 +421,13 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
421static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event, 421static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event,
422 struct sk_buff *skb) 422 struct sk_buff *skb)
423{ 423{
424 IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); 424 IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
425} 425}
426 426
427static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, 427static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
428 struct sk_buff *skb) 428 struct sk_buff *skb)
429{ 429{
430 IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); 430 IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
431} 431}
432 432
433/* 433/*
@@ -439,7 +439,7 @@ static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
439static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, 439static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event,
440 struct sk_buff *skb) 440 struct sk_buff *skb)
441{ 441{
442 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 442 IRDA_DEBUG(4, "%s()\n", __func__);
443 443
444 switch (event) { 444 switch (event) {
445 case IAP_RECV_F_LST: 445 case IAP_RECV_F_LST:
@@ -448,7 +448,7 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event,
448 iriap_call_indication(self, skb); 448 iriap_call_indication(self, skb);
449 break; 449 break;
450 default: 450 default:
451 IRDA_DEBUG(0, "%s(), unknown event!\n", __FUNCTION__); 451 IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
452 break; 452 break;
453 } 453 }
454} 454}
@@ -462,7 +462,7 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event,
462static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, 462static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event,
463 struct sk_buff *skb) 463 struct sk_buff *skb)
464{ 464{
465 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 465 IRDA_DEBUG(4, "%s()\n", __func__);
466 466
467 IRDA_ASSERT(skb != NULL, return;); 467 IRDA_ASSERT(skb != NULL, return;);
468 IRDA_ASSERT(self != NULL, return;); 468 IRDA_ASSERT(self != NULL, return;);
@@ -483,7 +483,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event,
483 irlmp_data_request(self->lsap, skb); 483 irlmp_data_request(self->lsap, skb);
484 break; 484 break;
485 default: 485 default:
486 IRDA_DEBUG(0, "%s(), unknown event!\n", __FUNCTION__); 486 IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
487 break; 487 break;
488 } 488 }
489} 489}
@@ -491,7 +491,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event,
491static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event, 491static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event,
492 struct sk_buff *skb) 492 struct sk_buff *skb)
493{ 493{
494 IRDA_DEBUG(0, "%s(), event=%d\n", __FUNCTION__, event); 494 IRDA_DEBUG(0, "%s(), event=%d\n", __func__, event);
495 495
496 switch (event) { 496 switch (event) {
497 case IAP_RECV_F_LST: 497 case IAP_RECV_F_LST:
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index cbcf04380f3a..99ebb96f1386 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -47,12 +47,12 @@ struct ias_object *irias_new_object( char *name, int id)
47{ 47{
48 struct ias_object *obj; 48 struct ias_object *obj;
49 49
50 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 50 IRDA_DEBUG( 4, "%s()\n", __func__);
51 51
52 obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); 52 obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
53 if (obj == NULL) { 53 if (obj == NULL) {
54 IRDA_WARNING("%s(), Unable to allocate object!\n", 54 IRDA_WARNING("%s(), Unable to allocate object!\n",
55 __FUNCTION__); 55 __func__);
56 return NULL; 56 return NULL;
57 } 57 }
58 58
@@ -60,7 +60,7 @@ struct ias_object *irias_new_object( char *name, int id)
60 obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC); 60 obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC);
61 if (!obj->name) { 61 if (!obj->name) {
62 IRDA_WARNING("%s(), Unable to allocate name!\n", 62 IRDA_WARNING("%s(), Unable to allocate name!\n",
63 __FUNCTION__); 63 __func__);
64 kfree(obj); 64 kfree(obj);
65 return NULL; 65 return NULL;
66 } 66 }
@@ -73,7 +73,7 @@ struct ias_object *irias_new_object( char *name, int id)
73 73
74 if (obj->attribs == NULL) { 74 if (obj->attribs == NULL) {
75 IRDA_WARNING("%s(), Unable to allocate attribs!\n", 75 IRDA_WARNING("%s(), Unable to allocate attribs!\n",
76 __FUNCTION__); 76 __func__);
77 kfree(obj->name); 77 kfree(obj->name);
78 kfree(obj); 78 kfree(obj);
79 return NULL; 79 return NULL;
@@ -134,7 +134,7 @@ int irias_delete_object(struct ias_object *obj)
134 node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj); 134 node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj);
135 if (!node) 135 if (!node)
136 IRDA_DEBUG( 0, "%s(), object already removed!\n", 136 IRDA_DEBUG( 0, "%s(), object already removed!\n",
137 __FUNCTION__); 137 __func__);
138 138
139 /* Destroy */ 139 /* Destroy */
140 __irias_delete_object(obj); 140 __irias_delete_object(obj);
@@ -268,7 +268,7 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name,
268 /* Find object */ 268 /* Find object */
269 obj = hashbin_lock_find(irias_objects, 0, obj_name); 269 obj = hashbin_lock_find(irias_objects, 0, obj_name);
270 if (obj == NULL) { 270 if (obj == NULL) {
271 IRDA_WARNING("%s: Unable to find object: %s\n", __FUNCTION__, 271 IRDA_WARNING("%s: Unable to find object: %s\n", __func__,
272 obj_name); 272 obj_name);
273 return -1; 273 return -1;
274 } 274 }
@@ -280,14 +280,14 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name,
280 attrib = hashbin_find(obj->attribs, 0, attrib_name); 280 attrib = hashbin_find(obj->attribs, 0, attrib_name);
281 if (attrib == NULL) { 281 if (attrib == NULL) {
282 IRDA_WARNING("%s: Unable to find attribute: %s\n", 282 IRDA_WARNING("%s: Unable to find attribute: %s\n",
283 __FUNCTION__, attrib_name); 283 __func__, attrib_name);
284 spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); 284 spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
285 return -1; 285 return -1;
286 } 286 }
287 287
288 if ( attrib->value->type != new_value->type) { 288 if ( attrib->value->type != new_value->type) {
289 IRDA_DEBUG( 0, "%s(), changing value type not allowed!\n", 289 IRDA_DEBUG( 0, "%s(), changing value type not allowed!\n",
290 __FUNCTION__); 290 __func__);
291 spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); 291 spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
292 return -1; 292 return -1;
293 } 293 }
@@ -322,7 +322,7 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
322 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 322 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
323 if (attrib == NULL) { 323 if (attrib == NULL) {
324 IRDA_WARNING("%s: Unable to allocate attribute!\n", 324 IRDA_WARNING("%s: Unable to allocate attribute!\n",
325 __FUNCTION__); 325 __func__);
326 return; 326 return;
327 } 327 }
328 328
@@ -333,7 +333,7 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
333 attrib->value = irias_new_integer_value(value); 333 attrib->value = irias_new_integer_value(value);
334 if (!attrib->name || !attrib->value) { 334 if (!attrib->name || !attrib->value) {
335 IRDA_WARNING("%s: Unable to allocate attribute!\n", 335 IRDA_WARNING("%s: Unable to allocate attribute!\n",
336 __FUNCTION__); 336 __func__);
337 if (attrib->value) 337 if (attrib->value)
338 irias_delete_value(attrib->value); 338 irias_delete_value(attrib->value);
339 kfree(attrib->name); 339 kfree(attrib->name);
@@ -366,7 +366,7 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
366 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 366 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
367 if (attrib == NULL) { 367 if (attrib == NULL) {
368 IRDA_WARNING("%s: Unable to allocate attribute!\n", 368 IRDA_WARNING("%s: Unable to allocate attribute!\n",
369 __FUNCTION__); 369 __func__);
370 return; 370 return;
371 } 371 }
372 372
@@ -376,7 +376,7 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
376 attrib->value = irias_new_octseq_value( octets, len); 376 attrib->value = irias_new_octseq_value( octets, len);
377 if (!attrib->name || !attrib->value) { 377 if (!attrib->name || !attrib->value) {
378 IRDA_WARNING("%s: Unable to allocate attribute!\n", 378 IRDA_WARNING("%s: Unable to allocate attribute!\n",
379 __FUNCTION__); 379 __func__);
380 if (attrib->value) 380 if (attrib->value)
381 irias_delete_value(attrib->value); 381 irias_delete_value(attrib->value);
382 kfree(attrib->name); 382 kfree(attrib->name);
@@ -408,7 +408,7 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
408 attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); 408 attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
409 if (attrib == NULL) { 409 if (attrib == NULL) {
410 IRDA_WARNING("%s: Unable to allocate attribute!\n", 410 IRDA_WARNING("%s: Unable to allocate attribute!\n",
411 __FUNCTION__); 411 __func__);
412 return; 412 return;
413 } 413 }
414 414
@@ -418,7 +418,7 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
418 attrib->value = irias_new_string_value(value); 418 attrib->value = irias_new_string_value(value);
419 if (!attrib->name || !attrib->value) { 419 if (!attrib->name || !attrib->value) {
420 IRDA_WARNING("%s: Unable to allocate attribute!\n", 420 IRDA_WARNING("%s: Unable to allocate attribute!\n",
421 __FUNCTION__); 421 __func__);
422 if (attrib->value) 422 if (attrib->value)
423 irias_delete_value(attrib->value); 423 irias_delete_value(attrib->value);
424 kfree(attrib->name); 424 kfree(attrib->name);
@@ -442,7 +442,7 @@ struct ias_value *irias_new_integer_value(int integer)
442 442
443 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 443 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
444 if (value == NULL) { 444 if (value == NULL) {
445 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 445 IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
446 return NULL; 446 return NULL;
447 } 447 }
448 448
@@ -467,7 +467,7 @@ struct ias_value *irias_new_string_value(char *string)
467 467
468 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 468 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
469 if (value == NULL) { 469 if (value == NULL) {
470 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 470 IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
471 return NULL; 471 return NULL;
472 } 472 }
473 473
@@ -475,7 +475,7 @@ struct ias_value *irias_new_string_value(char *string)
475 value->charset = CS_ASCII; 475 value->charset = CS_ASCII;
476 value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC); 476 value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC);
477 if (!value->t.string) { 477 if (!value->t.string) {
478 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 478 IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
479 kfree(value); 479 kfree(value);
480 return NULL; 480 return NULL;
481 } 481 }
@@ -498,7 +498,7 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
498 498
499 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 499 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
500 if (value == NULL) { 500 if (value == NULL) {
501 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 501 IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
502 return NULL; 502 return NULL;
503 } 503 }
504 504
@@ -510,7 +510,7 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
510 510
511 value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC); 511 value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC);
512 if (value->t.oct_seq == NULL){ 512 if (value->t.oct_seq == NULL){
513 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 513 IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
514 kfree(value); 514 kfree(value);
515 return NULL; 515 return NULL;
516 } 516 }
@@ -523,7 +523,7 @@ struct ias_value *irias_new_missing_value(void)
523 523
524 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 524 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
525 if (value == NULL) { 525 if (value == NULL) {
526 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 526 IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
527 return NULL; 527 return NULL;
528 } 528 }
529 529
@@ -540,7 +540,7 @@ struct ias_value *irias_new_missing_value(void)
540 */ 540 */
541void irias_delete_value(struct ias_value *value) 541void irias_delete_value(struct ias_value *value)
542{ 542{
543 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 543 IRDA_DEBUG(4, "%s()\n", __func__);
544 544
545 IRDA_ASSERT(value != NULL, return;); 545 IRDA_ASSERT(value != NULL, return;);
546 546
@@ -558,7 +558,7 @@ void irias_delete_value(struct ias_value *value)
558 kfree(value->t.oct_seq); 558 kfree(value->t.oct_seq);
559 break; 559 break;
560 default: 560 default:
561 IRDA_DEBUG(0, "%s(), Unknown value type!\n", __FUNCTION__); 561 IRDA_DEBUG(0, "%s(), Unknown value type!\n", __func__);
562 break; 562 break;
563 } 563 }
564 kfree(value); 564 kfree(value);
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index fff52d57a200..6be1ec26b30c 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -72,7 +72,7 @@ static void irlan_client_kick_timer_expired(void *data)
72{ 72{
73 struct irlan_cb *self = (struct irlan_cb *) data; 73 struct irlan_cb *self = (struct irlan_cb *) data;
74 74
75 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 75 IRDA_DEBUG(2, "%s()\n", __func__ );
76 76
77 IRDA_ASSERT(self != NULL, return;); 77 IRDA_ASSERT(self != NULL, return;);
78 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 78 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -91,7 +91,7 @@ static void irlan_client_kick_timer_expired(void *data)
91 91
92static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) 92static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
93{ 93{
94 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 94 IRDA_DEBUG(4, "%s()\n", __func__ );
95 95
96 irda_start_timer(&self->client.kick_timer, timeout, (void *) self, 96 irda_start_timer(&self->client.kick_timer, timeout, (void *) self,
97 irlan_client_kick_timer_expired); 97 irlan_client_kick_timer_expired);
@@ -105,7 +105,7 @@ static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
105 */ 105 */
106void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) 106void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
107{ 107{
108 IRDA_DEBUG(1, "%s()\n", __FUNCTION__ ); 108 IRDA_DEBUG(1, "%s()\n", __func__ );
109 109
110 IRDA_ASSERT(self != NULL, return;); 110 IRDA_ASSERT(self != NULL, return;);
111 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 111 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -117,7 +117,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
117 if ((self->client.state != IRLAN_IDLE) || 117 if ((self->client.state != IRLAN_IDLE) ||
118 (self->provider.access_type == ACCESS_DIRECT)) 118 (self->provider.access_type == ACCESS_DIRECT))
119 { 119 {
120 IRDA_DEBUG(0, "%s(), already awake!\n", __FUNCTION__ ); 120 IRDA_DEBUG(0, "%s(), already awake!\n", __func__ );
121 return; 121 return;
122 } 122 }
123 123
@@ -126,7 +126,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
126 self->daddr = daddr; 126 self->daddr = daddr;
127 127
128 if (self->disconnect_reason == LM_USER_REQUEST) { 128 if (self->disconnect_reason == LM_USER_REQUEST) {
129 IRDA_DEBUG(0, "%s(), still stopped by user\n", __FUNCTION__ ); 129 IRDA_DEBUG(0, "%s(), still stopped by user\n", __func__ );
130 return; 130 return;
131 } 131 }
132 132
@@ -153,7 +153,7 @@ void irlan_client_discovery_indication(discinfo_t *discovery,
153 struct irlan_cb *self; 153 struct irlan_cb *self;
154 __u32 saddr, daddr; 154 __u32 saddr, daddr;
155 155
156 IRDA_DEBUG(1, "%s()\n", __FUNCTION__ ); 156 IRDA_DEBUG(1, "%s()\n", __func__ );
157 157
158 IRDA_ASSERT(discovery != NULL, return;); 158 IRDA_ASSERT(discovery != NULL, return;);
159 159
@@ -175,7 +175,7 @@ void irlan_client_discovery_indication(discinfo_t *discovery,
175 if (self) { 175 if (self) {
176 IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;); 176 IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;);
177 177
178 IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__ , 178 IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __func__ ,
179 daddr); 179 daddr);
180 180
181 irlan_client_wakeup(self, saddr, daddr); 181 irlan_client_wakeup(self, saddr, daddr);
@@ -195,7 +195,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
195{ 195{
196 struct irlan_cb *self; 196 struct irlan_cb *self;
197 197
198 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 198 IRDA_DEBUG(2, "%s()\n", __func__ );
199 199
200 self = (struct irlan_cb *) instance; 200 self = (struct irlan_cb *) instance;
201 201
@@ -206,7 +206,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
206 irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb); 206 irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb);
207 207
208 /* Ready for a new command */ 208 /* Ready for a new command */
209 IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __FUNCTION__ ); 209 IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __func__ );
210 self->client.tx_busy = FALSE; 210 self->client.tx_busy = FALSE;
211 211
212 /* Check if we have some queued commands waiting to be sent */ 212 /* Check if we have some queued commands waiting to be sent */
@@ -223,7 +223,7 @@ static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
223 struct tsap_cb *tsap; 223 struct tsap_cb *tsap;
224 struct sk_buff *skb; 224 struct sk_buff *skb;
225 225
226 IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__ , reason); 226 IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
227 227
228 self = (struct irlan_cb *) instance; 228 self = (struct irlan_cb *) instance;
229 tsap = (struct tsap_cb *) sap; 229 tsap = (struct tsap_cb *) sap;
@@ -255,7 +255,7 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
255 struct tsap_cb *tsap; 255 struct tsap_cb *tsap;
256 notify_t notify; 256 notify_t notify;
257 257
258 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 258 IRDA_DEBUG(4, "%s()\n", __func__ );
259 259
260 IRDA_ASSERT(self != NULL, return;); 260 IRDA_ASSERT(self != NULL, return;);
261 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 261 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -275,7 +275,7 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
275 275
276 tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify); 276 tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
277 if (!tsap) { 277 if (!tsap) {
278 IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__ ); 278 IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ );
279 return; 279 return;
280 } 280 }
281 self->client.tsap_ctrl = tsap; 281 self->client.tsap_ctrl = tsap;
@@ -295,7 +295,7 @@ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
295{ 295{
296 struct irlan_cb *self; 296 struct irlan_cb *self;
297 297
298 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 298 IRDA_DEBUG(4, "%s()\n", __func__ );
299 299
300 self = (struct irlan_cb *) instance; 300 self = (struct irlan_cb *) instance;
301 301
@@ -374,13 +374,13 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
374 374
375 IRDA_ASSERT(skb != NULL, return;); 375 IRDA_ASSERT(skb != NULL, return;);
376 376
377 IRDA_DEBUG(4, "%s() skb->len=%d\n", __FUNCTION__ , (int) skb->len); 377 IRDA_DEBUG(4, "%s() skb->len=%d\n", __func__ , (int) skb->len);
378 378
379 IRDA_ASSERT(self != NULL, return;); 379 IRDA_ASSERT(self != NULL, return;);
380 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 380 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
381 381
382 if (!skb) { 382 if (!skb) {
383 IRDA_ERROR("%s(), Got NULL skb!\n", __FUNCTION__); 383 IRDA_ERROR("%s(), Got NULL skb!\n", __func__);
384 return; 384 return;
385 } 385 }
386 frame = skb->data; 386 frame = skb->data;
@@ -405,7 +405,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
405 /* How many parameters? */ 405 /* How many parameters? */
406 count = frame[1]; 406 count = frame[1];
407 407
408 IRDA_DEBUG(4, "%s(), got %d parameters\n", __FUNCTION__ , count); 408 IRDA_DEBUG(4, "%s(), got %d parameters\n", __func__ , count);
409 409
410 ptr = frame+2; 410 ptr = frame+2;
411 411
@@ -413,7 +413,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
413 for (i=0; i<count;i++) { 413 for (i=0; i<count;i++) {
414 ret = irlan_extract_param(ptr, name, value, &val_len); 414 ret = irlan_extract_param(ptr, name, value, &val_len);
415 if (ret < 0) { 415 if (ret < 0) {
416 IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __FUNCTION__ ); 416 IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ );
417 break; 417 break;
418 } 418 }
419 ptr += ret; 419 ptr += ret;
@@ -438,7 +438,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
438 int i; 438 int i;
439 DECLARE_MAC_BUF(mac); 439 DECLARE_MAC_BUF(mac);
440 440
441 IRDA_DEBUG(4, "%s(), parm=%s\n", __FUNCTION__ , param); 441 IRDA_DEBUG(4, "%s(), parm=%s\n", __func__ , param);
442 442
443 IRDA_ASSERT(self != NULL, return;); 443 IRDA_ASSERT(self != NULL, return;);
444 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 444 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -476,7 +476,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
476 else if (strcmp(value, "HOSTED") == 0) 476 else if (strcmp(value, "HOSTED") == 0)
477 self->client.access_type = ACCESS_HOSTED; 477 self->client.access_type = ACCESS_HOSTED;
478 else { 478 else {
479 IRDA_DEBUG(2, "%s(), unknown access type!\n", __FUNCTION__ ); 479 IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ );
480 } 480 }
481 } 481 }
482 /* IRLAN version */ 482 /* IRLAN version */
@@ -498,14 +498,14 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
498 memcpy(&tmp_cpu, value, 2); /* Align value */ 498 memcpy(&tmp_cpu, value, 2); /* Align value */
499 le16_to_cpus(&tmp_cpu); /* Convert to host order */ 499 le16_to_cpus(&tmp_cpu); /* Convert to host order */
500 self->client.recv_arb_val = tmp_cpu; 500 self->client.recv_arb_val = tmp_cpu;
501 IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __FUNCTION__ , 501 IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __func__ ,
502 self->client.recv_arb_val); 502 self->client.recv_arb_val);
503 } 503 }
504 if (strcmp(param, "MAX_FRAME") == 0) { 504 if (strcmp(param, "MAX_FRAME") == 0) {
505 memcpy(&tmp_cpu, value, 2); /* Align value */ 505 memcpy(&tmp_cpu, value, 2); /* Align value */
506 le16_to_cpus(&tmp_cpu); /* Convert to host order */ 506 le16_to_cpus(&tmp_cpu); /* Convert to host order */
507 self->client.max_frame = tmp_cpu; 507 self->client.max_frame = tmp_cpu;
508 IRDA_DEBUG(4, "%s(), max frame=%d\n", __FUNCTION__ , 508 IRDA_DEBUG(4, "%s(), max frame=%d\n", __func__ ,
509 self->client.max_frame); 509 self->client.max_frame);
510 } 510 }
511 511
@@ -539,7 +539,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
539{ 539{
540 struct irlan_cb *self; 540 struct irlan_cb *self;
541 541
542 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 542 IRDA_DEBUG(4, "%s()\n", __func__ );
543 543
544 IRDA_ASSERT(priv != NULL, return;); 544 IRDA_ASSERT(priv != NULL, return;);
545 545
@@ -552,7 +552,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
552 552
553 /* Check if request succeeded */ 553 /* Check if request succeeded */
554 if (result != IAS_SUCCESS) { 554 if (result != IAS_SUCCESS) {
555 IRDA_DEBUG(2, "%s(), got NULL value!\n", __FUNCTION__ ); 555 IRDA_DEBUG(2, "%s(), got NULL value!\n", __func__ );
556 irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, 556 irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL,
557 NULL); 557 NULL);
558 return; 558 return;
@@ -570,7 +570,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
570 irias_delete_value(value); 570 irias_delete_value(value);
571 break; 571 break;
572 default: 572 default:
573 IRDA_DEBUG(2, "%s(), unknown type!\n", __FUNCTION__ ); 573 IRDA_DEBUG(2, "%s(), unknown type!\n", __func__ );
574 break; 574 break;
575 } 575 }
576 irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); 576 irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL);
diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c
index 6afcee59e906..8d5a8ebc444f 100644
--- a/net/irda/irlan/irlan_client_event.c
+++ b/net/irda/irlan/irlan_client_event.c
@@ -92,7 +92,7 @@ void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
92static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, 92static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
93 struct sk_buff *skb) 93 struct sk_buff *skb)
94{ 94{
95 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 95 IRDA_DEBUG(4, "%s()\n", __func__ );
96 96
97 IRDA_ASSERT(self != NULL, return -1;); 97 IRDA_ASSERT(self != NULL, return -1;);
98 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 98 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -101,7 +101,7 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
101 case IRLAN_DISCOVERY_INDICATION: 101 case IRLAN_DISCOVERY_INDICATION:
102 if (self->client.iriap) { 102 if (self->client.iriap) {
103 IRDA_WARNING("%s(), busy with a previous query\n", 103 IRDA_WARNING("%s(), busy with a previous query\n",
104 __FUNCTION__); 104 __func__);
105 return -EBUSY; 105 return -EBUSY;
106 } 106 }
107 107
@@ -114,10 +114,10 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
114 "IrLAN", "IrDA:TinyTP:LsapSel"); 114 "IrLAN", "IrDA:TinyTP:LsapSel");
115 break; 115 break;
116 case IRLAN_WATCHDOG_TIMEOUT: 116 case IRLAN_WATCHDOG_TIMEOUT:
117 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); 117 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
118 break; 118 break;
119 default: 119 default:
120 IRDA_DEBUG(4, "%s(), Unknown event %d\n", __FUNCTION__ , event); 120 IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event);
121 break; 121 break;
122 } 122 }
123 if (skb) 123 if (skb)
@@ -136,7 +136,7 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
136static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, 136static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
137 struct sk_buff *skb) 137 struct sk_buff *skb)
138{ 138{
139 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 139 IRDA_DEBUG(4, "%s()\n", __func__ );
140 140
141 IRDA_ASSERT(self != NULL, return -1;); 141 IRDA_ASSERT(self != NULL, return -1;);
142 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 142 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -154,7 +154,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
154 irlan_next_client_state(self, IRLAN_CONN); 154 irlan_next_client_state(self, IRLAN_CONN);
155 break; 155 break;
156 case IRLAN_IAS_PROVIDER_NOT_AVAIL: 156 case IRLAN_IAS_PROVIDER_NOT_AVAIL:
157 IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __FUNCTION__ ); 157 IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__ );
158 irlan_next_client_state(self, IRLAN_IDLE); 158 irlan_next_client_state(self, IRLAN_IDLE);
159 159
160 /* Give the client a kick! */ 160 /* Give the client a kick! */
@@ -167,10 +167,10 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
167 irlan_next_client_state(self, IRLAN_IDLE); 167 irlan_next_client_state(self, IRLAN_IDLE);
168 break; 168 break;
169 case IRLAN_WATCHDOG_TIMEOUT: 169 case IRLAN_WATCHDOG_TIMEOUT:
170 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); 170 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
171 break; 171 break;
172 default: 172 default:
173 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 173 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
174 break; 174 break;
175 } 175 }
176 if (skb) 176 if (skb)
@@ -189,7 +189,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
189static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, 189static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
190 struct sk_buff *skb) 190 struct sk_buff *skb)
191{ 191{
192 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 192 IRDA_DEBUG(4, "%s()\n", __func__ );
193 193
194 IRDA_ASSERT(self != NULL, return -1;); 194 IRDA_ASSERT(self != NULL, return -1;);
195 195
@@ -204,10 +204,10 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
204 irlan_next_client_state(self, IRLAN_IDLE); 204 irlan_next_client_state(self, IRLAN_IDLE);
205 break; 205 break;
206 case IRLAN_WATCHDOG_TIMEOUT: 206 case IRLAN_WATCHDOG_TIMEOUT:
207 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); 207 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
208 break; 208 break;
209 default: 209 default:
210 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 210 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
211 break; 211 break;
212 } 212 }
213 if (skb) 213 if (skb)
@@ -224,7 +224,7 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
224static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, 224static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
225 struct sk_buff *skb) 225 struct sk_buff *skb)
226{ 226{
227 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 227 IRDA_DEBUG(4, "%s()\n", __func__ );
228 228
229 IRDA_ASSERT(self != NULL, return -1;); 229 IRDA_ASSERT(self != NULL, return -1;);
230 230
@@ -244,10 +244,10 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
244 irlan_next_client_state(self, IRLAN_IDLE); 244 irlan_next_client_state(self, IRLAN_IDLE);
245 break; 245 break;
246 case IRLAN_WATCHDOG_TIMEOUT: 246 case IRLAN_WATCHDOG_TIMEOUT:
247 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); 247 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
248 break; 248 break;
249 default: 249 default:
250 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 250 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
251 break; 251 break;
252 } 252 }
253 if (skb) 253 if (skb)
@@ -266,7 +266,7 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
266static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, 266static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
267 struct sk_buff *skb) 267 struct sk_buff *skb)
268{ 268{
269 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 269 IRDA_DEBUG(4, "%s()\n", __func__ );
270 270
271 IRDA_ASSERT(self != NULL, return -1;); 271 IRDA_ASSERT(self != NULL, return -1;);
272 272
@@ -281,10 +281,10 @@ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
281 irlan_next_client_state(self, IRLAN_IDLE); 281 irlan_next_client_state(self, IRLAN_IDLE);
282 break; 282 break;
283 case IRLAN_WATCHDOG_TIMEOUT: 283 case IRLAN_WATCHDOG_TIMEOUT:
284 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); 284 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
285 break; 285 break;
286 default: 286 default:
287 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 287 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
288 break; 288 break;
289 } 289 }
290 if (skb) 290 if (skb)
@@ -305,7 +305,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
305{ 305{
306 struct qos_info qos; 306 struct qos_info qos;
307 307
308 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 308 IRDA_DEBUG(4, "%s()\n", __func__ );
309 309
310 IRDA_ASSERT(self != NULL, return -1;); 310 IRDA_ASSERT(self != NULL, return -1;);
311 311
@@ -344,7 +344,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
344 irlan_next_client_state(self, IRLAN_DATA); 344 irlan_next_client_state(self, IRLAN_DATA);
345 break; 345 break;
346 default: 346 default:
347 IRDA_DEBUG(2, "%s(), unknown access type!\n", __FUNCTION__ ); 347 IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ );
348 break; 348 break;
349 } 349 }
350 break; 350 break;
@@ -353,10 +353,10 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
353 irlan_next_client_state(self, IRLAN_IDLE); 353 irlan_next_client_state(self, IRLAN_IDLE);
354 break; 354 break;
355 case IRLAN_WATCHDOG_TIMEOUT: 355 case IRLAN_WATCHDOG_TIMEOUT:
356 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); 356 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
357 break; 357 break;
358 default: 358 default:
359 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 359 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
360 break; 360 break;
361 } 361 }
362 362
@@ -376,7 +376,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
376static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, 376static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event,
377 struct sk_buff *skb) 377 struct sk_buff *skb)
378{ 378{
379 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 379 IRDA_DEBUG(4, "%s()\n", __func__ );
380 380
381 IRDA_ASSERT(self != NULL, return -1;); 381 IRDA_ASSERT(self != NULL, return -1;);
382 382
@@ -390,10 +390,10 @@ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event,
390 irlan_next_client_state(self, IRLAN_IDLE); 390 irlan_next_client_state(self, IRLAN_IDLE);
391 break; 391 break;
392 case IRLAN_WATCHDOG_TIMEOUT: 392 case IRLAN_WATCHDOG_TIMEOUT:
393 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); 393 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
394 break; 394 break;
395 default: 395 default:
396 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 396 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
397 break; 397 break;
398 } 398 }
399 if (skb) 399 if (skb)
@@ -407,7 +407,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
407{ 407{
408 struct qos_info qos; 408 struct qos_info qos;
409 409
410 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 410 IRDA_DEBUG(2, "%s()\n", __func__ );
411 411
412 IRDA_ASSERT(self != NULL, return -1;); 412 IRDA_ASSERT(self != NULL, return -1;);
413 413
@@ -429,7 +429,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
429 } else if (self->client.recv_arb_val > 429 } else if (self->client.recv_arb_val >
430 self->provider.send_arb_val) 430 self->provider.send_arb_val)
431 { 431 {
432 IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __FUNCTION__ ); 432 IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __func__ );
433 } 433 }
434 break; 434 break;
435 case IRLAN_DATA_CONNECT_INDICATION: 435 case IRLAN_DATA_CONNECT_INDICATION:
@@ -440,10 +440,10 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
440 irlan_next_client_state(self, IRLAN_IDLE); 440 irlan_next_client_state(self, IRLAN_IDLE);
441 break; 441 break;
442 case IRLAN_WATCHDOG_TIMEOUT: 442 case IRLAN_WATCHDOG_TIMEOUT:
443 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); 443 IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
444 break; 444 break;
445 default: 445 default:
446 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 446 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
447 break; 447 break;
448 } 448 }
449 if (skb) 449 if (skb)
@@ -462,7 +462,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
462static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, 462static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
463 struct sk_buff *skb) 463 struct sk_buff *skb)
464{ 464{
465 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 465 IRDA_DEBUG(4, "%s()\n", __func__ );
466 466
467 IRDA_ASSERT(self != NULL, return -1;); 467 IRDA_ASSERT(self != NULL, return -1;);
468 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 468 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -476,7 +476,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
476 irlan_next_client_state(self, IRLAN_IDLE); 476 irlan_next_client_state(self, IRLAN_IDLE);
477 break; 477 break;
478 default: 478 default:
479 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 479 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
480 break; 480 break;
481 } 481 }
482 if (skb) 482 if (skb)
@@ -494,7 +494,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
494static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, 494static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
495 struct sk_buff *skb) 495 struct sk_buff *skb)
496{ 496{
497 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 497 IRDA_DEBUG(2, "%s()\n", __func__ );
498 498
499 if (skb) 499 if (skb)
500 dev_kfree_skb(skb); 500 dev_kfree_skb(skb);
@@ -511,7 +511,7 @@ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
511static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, 511static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event,
512 struct sk_buff *skb) 512 struct sk_buff *skb)
513{ 513{
514 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 514 IRDA_DEBUG(2, "%s()\n", __func__ );
515 515
516 if (skb) 516 if (skb)
517 dev_kfree_skb(skb); 517 dev_kfree_skb(skb);
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 1eb4bbcb1c9e..9a1cd87e7142 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -54,13 +54,6 @@
54#include <net/irda/irlan_filter.h> 54#include <net/irda/irlan_filter.h>
55 55
56 56
57/*
58 * Send gratuitous ARP when connected to a new AP or not. May be a clever
59 * thing to do, but for some reason the machine crashes if you use DHCP. So
60 * lets not use it by default.
61 */
62#undef CONFIG_IRLAN_SEND_GRATUITOUS_ARP
63
64/* extern char sysctl_devname[]; */ 57/* extern char sysctl_devname[]; */
65 58
66/* 59/*
@@ -124,7 +117,7 @@ static int __init irlan_init(void)
124 struct irlan_cb *new; 117 struct irlan_cb *new;
125 __u16 hints; 118 __u16 hints;
126 119
127 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 120 IRDA_DEBUG(2, "%s()\n", __func__ );
128 121
129#ifdef CONFIG_PROC_FS 122#ifdef CONFIG_PROC_FS
130 { struct proc_dir_entry *proc; 123 { struct proc_dir_entry *proc;
@@ -136,7 +129,7 @@ static int __init irlan_init(void)
136 } 129 }
137#endif /* CONFIG_PROC_FS */ 130#endif /* CONFIG_PROC_FS */
138 131
139 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 132 IRDA_DEBUG(4, "%s()\n", __func__ );
140 hints = irlmp_service_to_hint(S_LAN); 133 hints = irlmp_service_to_hint(S_LAN);
141 134
142 /* Register with IrLMP as a client */ 135 /* Register with IrLMP as a client */
@@ -179,7 +172,7 @@ static void __exit irlan_cleanup(void)
179{ 172{
180 struct irlan_cb *self, *next; 173 struct irlan_cb *self, *next;
181 174
182 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 175 IRDA_DEBUG(4, "%s()\n", __func__ );
183 176
184 irlmp_unregister_client(ckey); 177 irlmp_unregister_client(ckey);
185 irlmp_unregister_service(skey); 178 irlmp_unregister_service(skey);
@@ -207,7 +200,7 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr)
207 struct net_device *dev; 200 struct net_device *dev;
208 struct irlan_cb *self; 201 struct irlan_cb *self;
209 202
210 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 203 IRDA_DEBUG(2, "%s()\n", __func__ );
211 204
212 /* Create network device with irlan */ 205 /* Create network device with irlan */
213 dev = alloc_irlandev(eth ? "eth%d" : "irlan%d"); 206 dev = alloc_irlandev(eth ? "eth%d" : "irlan%d");
@@ -252,7 +245,7 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr)
252 245
253 if (register_netdev(dev)) { 246 if (register_netdev(dev)) {
254 IRDA_DEBUG(2, "%s(), register_netdev() failed!\n", 247 IRDA_DEBUG(2, "%s(), register_netdev() failed!\n",
255 __FUNCTION__ ); 248 __func__ );
256 self = NULL; 249 self = NULL;
257 free_netdev(dev); 250 free_netdev(dev);
258 } else { 251 } else {
@@ -272,7 +265,7 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr)
272 */ 265 */
273static void __irlan_close(struct irlan_cb *self) 266static void __irlan_close(struct irlan_cb *self)
274{ 267{
275 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 268 IRDA_DEBUG(2, "%s()\n", __func__ );
276 269
277 ASSERT_RTNL(); 270 ASSERT_RTNL();
278 IRDA_ASSERT(self != NULL, return;); 271 IRDA_ASSERT(self != NULL, return;);
@@ -320,7 +313,7 @@ static void irlan_connect_indication(void *instance, void *sap,
320 struct irlan_cb *self; 313 struct irlan_cb *self;
321 struct tsap_cb *tsap; 314 struct tsap_cb *tsap;
322 315
323 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 316 IRDA_DEBUG(2, "%s()\n", __func__ );
324 317
325 self = (struct irlan_cb *) instance; 318 self = (struct irlan_cb *) instance;
326 tsap = (struct tsap_cb *) sap; 319 tsap = (struct tsap_cb *) sap;
@@ -332,7 +325,7 @@ static void irlan_connect_indication(void *instance, void *sap,
332 self->max_sdu_size = max_sdu_size; 325 self->max_sdu_size = max_sdu_size;
333 self->max_header_size = max_header_size; 326 self->max_header_size = max_header_size;
334 327
335 IRDA_DEBUG(0, "%s: We are now connected!\n", __FUNCTION__); 328 IRDA_DEBUG(0, "%s: We are now connected!\n", __func__);
336 329
337 del_timer(&self->watchdog_timer); 330 del_timer(&self->watchdog_timer);
338 331
@@ -376,7 +369,7 @@ static void irlan_connect_confirm(void *instance, void *sap,
376 369
377 /* TODO: we could set the MTU depending on the max_sdu_size */ 370 /* TODO: we could set the MTU depending on the max_sdu_size */
378 371
379 IRDA_DEBUG(0, "%s: We are now connected!\n", __FUNCTION__); 372 IRDA_DEBUG(0, "%s: We are now connected!\n", __func__);
380 del_timer(&self->watchdog_timer); 373 del_timer(&self->watchdog_timer);
381 374
382 /* 375 /*
@@ -393,9 +386,6 @@ static void irlan_connect_confirm(void *instance, void *sap,
393 /* Ready to transfer Ethernet frames */ 386 /* Ready to transfer Ethernet frames */
394 netif_start_queue(self->dev); 387 netif_start_queue(self->dev);
395 self->disconnect_reason = 0; /* Clear reason */ 388 self->disconnect_reason = 0; /* Clear reason */
396#ifdef CONFIG_IRLAN_SEND_GRATUITOUS_ARP
397 irlan_eth_send_gratuitous_arp(&self->dev);
398#endif
399 wake_up_interruptible(&self->open_wait); 389 wake_up_interruptible(&self->open_wait);
400} 390}
401 391
@@ -412,7 +402,7 @@ static void irlan_disconnect_indication(void *instance,
412 struct irlan_cb *self; 402 struct irlan_cb *self;
413 struct tsap_cb *tsap; 403 struct tsap_cb *tsap;
414 404
415 IRDA_DEBUG(0, "%s(), reason=%d\n", __FUNCTION__ , reason); 405 IRDA_DEBUG(0, "%s(), reason=%d\n", __func__ , reason);
416 406
417 self = (struct irlan_cb *) instance; 407 self = (struct irlan_cb *) instance;
418 tsap = (struct tsap_cb *) sap; 408 tsap = (struct tsap_cb *) sap;
@@ -431,22 +421,22 @@ static void irlan_disconnect_indication(void *instance,
431 421
432 switch (reason) { 422 switch (reason) {
433 case LM_USER_REQUEST: /* User request */ 423 case LM_USER_REQUEST: /* User request */
434 IRDA_DEBUG(2, "%s(), User requested\n", __FUNCTION__ ); 424 IRDA_DEBUG(2, "%s(), User requested\n", __func__ );
435 break; 425 break;
436 case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */ 426 case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */
437 IRDA_DEBUG(2, "%s(), Unexpected IrLAP disconnect\n", __FUNCTION__ ); 427 IRDA_DEBUG(2, "%s(), Unexpected IrLAP disconnect\n", __func__ );
438 break; 428 break;
439 case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */ 429 case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */
440 IRDA_DEBUG(2, "%s(), IrLAP connect failed\n", __FUNCTION__ ); 430 IRDA_DEBUG(2, "%s(), IrLAP connect failed\n", __func__ );
441 break; 431 break;
442 case LM_LAP_RESET: /* IrLAP reset */ 432 case LM_LAP_RESET: /* IrLAP reset */
443 IRDA_DEBUG(2, "%s(), IrLAP reset\n", __FUNCTION__ ); 433 IRDA_DEBUG(2, "%s(), IrLAP reset\n", __func__ );
444 break; 434 break;
445 case LM_INIT_DISCONNECT: 435 case LM_INIT_DISCONNECT:
446 IRDA_DEBUG(2, "%s(), IrLMP connect failed\n", __FUNCTION__ ); 436 IRDA_DEBUG(2, "%s(), IrLMP connect failed\n", __func__ );
447 break; 437 break;
448 default: 438 default:
449 IRDA_ERROR("%s(), Unknown disconnect reason\n", __FUNCTION__); 439 IRDA_ERROR("%s(), Unknown disconnect reason\n", __func__);
450 break; 440 break;
451 } 441 }
452 442
@@ -468,7 +458,7 @@ void irlan_open_data_tsap(struct irlan_cb *self)
468 struct tsap_cb *tsap; 458 struct tsap_cb *tsap;
469 notify_t notify; 459 notify_t notify;
470 460
471 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 461 IRDA_DEBUG(2, "%s()\n", __func__ );
472 462
473 IRDA_ASSERT(self != NULL, return;); 463 IRDA_ASSERT(self != NULL, return;);
474 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 464 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -490,7 +480,7 @@ void irlan_open_data_tsap(struct irlan_cb *self)
490 480
491 tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify); 481 tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
492 if (!tsap) { 482 if (!tsap) {
493 IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__ ); 483 IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ );
494 return; 484 return;
495 } 485 }
496 self->tsap_data = tsap; 486 self->tsap_data = tsap;
@@ -504,7 +494,7 @@ void irlan_open_data_tsap(struct irlan_cb *self)
504 494
505void irlan_close_tsaps(struct irlan_cb *self) 495void irlan_close_tsaps(struct irlan_cb *self)
506{ 496{
507 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 497 IRDA_DEBUG(4, "%s()\n", __func__ );
508 498
509 IRDA_ASSERT(self != NULL, return;); 499 IRDA_ASSERT(self != NULL, return;);
510 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 500 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -594,7 +584,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
594{ 584{
595 struct sk_buff *skb; 585 struct sk_buff *skb;
596 586
597 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 587 IRDA_DEBUG(2, "%s()\n", __func__ );
598 588
599 if (irda_lock(&self->client.tx_busy) == FALSE) 589 if (irda_lock(&self->client.tx_busy) == FALSE)
600 return -EBUSY; 590 return -EBUSY;
@@ -613,7 +603,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
613 dev_kfree_skb(skb); 603 dev_kfree_skb(skb);
614 return -1; 604 return -1;
615 } 605 }
616 IRDA_DEBUG(2, "%s(), sending ...\n", __FUNCTION__ ); 606 IRDA_DEBUG(2, "%s(), sending ...\n", __func__ );
617 607
618 return irttp_data_request(self->client.tsap_ctrl, skb); 608 return irttp_data_request(self->client.tsap_ctrl, skb);
619} 609}
@@ -626,7 +616,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
626 */ 616 */
627static void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb) 617static void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb)
628{ 618{
629 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 619 IRDA_DEBUG(2, "%s()\n", __func__ );
630 620
631 /* Queue command */ 621 /* Queue command */
632 skb_queue_tail(&self->client.txq, skb); 622 skb_queue_tail(&self->client.txq, skb);
@@ -646,7 +636,7 @@ void irlan_get_provider_info(struct irlan_cb *self)
646 struct sk_buff *skb; 636 struct sk_buff *skb;
647 __u8 *frame; 637 __u8 *frame;
648 638
649 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 639 IRDA_DEBUG(4, "%s()\n", __func__ );
650 640
651 IRDA_ASSERT(self != NULL, return;); 641 IRDA_ASSERT(self != NULL, return;);
652 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 642 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -679,7 +669,7 @@ void irlan_open_data_channel(struct irlan_cb *self)
679 struct sk_buff *skb; 669 struct sk_buff *skb;
680 __u8 *frame; 670 __u8 *frame;
681 671
682 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 672 IRDA_DEBUG(4, "%s()\n", __func__ );
683 673
684 IRDA_ASSERT(self != NULL, return;); 674 IRDA_ASSERT(self != NULL, return;);
685 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 675 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -714,7 +704,7 @@ void irlan_close_data_channel(struct irlan_cb *self)
714 struct sk_buff *skb; 704 struct sk_buff *skb;
715 __u8 *frame; 705 __u8 *frame;
716 706
717 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 707 IRDA_DEBUG(4, "%s()\n", __func__ );
718 708
719 IRDA_ASSERT(self != NULL, return;); 709 IRDA_ASSERT(self != NULL, return;);
720 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 710 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -755,7 +745,7 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
755 struct sk_buff *skb; 745 struct sk_buff *skb;
756 __u8 *frame; 746 __u8 *frame;
757 747
758 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 748 IRDA_DEBUG(4, "%s()\n", __func__ );
759 749
760 IRDA_ASSERT(self != NULL, return;); 750 IRDA_ASSERT(self != NULL, return;);
761 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 751 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -797,7 +787,7 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
797 struct sk_buff *skb; 787 struct sk_buff *skb;
798 __u8 *frame; 788 __u8 *frame;
799 789
800 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 790 IRDA_DEBUG(2, "%s()\n", __func__ );
801 791
802 IRDA_ASSERT(self != NULL, return;); 792 IRDA_ASSERT(self != NULL, return;);
803 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 793 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -841,7 +831,7 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
841 struct sk_buff *skb; 831 struct sk_buff *skb;
842 __u8 *frame; 832 __u8 *frame;
843 833
844 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 834 IRDA_DEBUG(2, "%s()\n", __func__ );
845 835
846 IRDA_ASSERT(self != NULL, return;); 836 IRDA_ASSERT(self != NULL, return;);
847 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 837 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -886,7 +876,7 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
886 struct sk_buff *skb; 876 struct sk_buff *skb;
887 __u8 *frame; 877 __u8 *frame;
888 878
889 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 879 IRDA_DEBUG(2, "%s()\n", __func__ );
890 880
891 IRDA_ASSERT(self != NULL, return;); 881 IRDA_ASSERT(self != NULL, return;);
892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 882 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -926,7 +916,7 @@ void irlan_get_media_char(struct irlan_cb *self)
926 struct sk_buff *skb; 916 struct sk_buff *skb;
927 __u8 *frame; 917 __u8 *frame;
928 918
929 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 919 IRDA_DEBUG(4, "%s()\n", __func__ );
930 920
931 IRDA_ASSERT(self != NULL, return;); 921 IRDA_ASSERT(self != NULL, return;);
932 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 922 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -1014,7 +1004,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
1014 int n=0; 1004 int n=0;
1015 1005
1016 if (skb == NULL) { 1006 if (skb == NULL) {
1017 IRDA_DEBUG(2, "%s(), Got NULL skb\n", __FUNCTION__ ); 1007 IRDA_DEBUG(2, "%s(), Got NULL skb\n", __func__ );
1018 return 0; 1008 return 0;
1019 } 1009 }
1020 1010
@@ -1031,7 +1021,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
1031 IRDA_ASSERT(value_len > 0, return 0;); 1021 IRDA_ASSERT(value_len > 0, return 0;);
1032 break; 1022 break;
1033 default: 1023 default:
1034 IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __FUNCTION__ ); 1024 IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __func__ );
1035 return 0; 1025 return 0;
1036 break; 1026 break;
1037 } 1027 }
@@ -1041,7 +1031,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
1041 1031
1042 /* Make space for data */ 1032 /* Make space for data */
1043 if (skb_tailroom(skb) < (param_len+value_len+3)) { 1033 if (skb_tailroom(skb) < (param_len+value_len+3)) {
1044 IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __FUNCTION__ ); 1034 IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __func__ );
1045 return 0; 1035 return 0;
1046 } 1036 }
1047 skb_put(skb, param_len+value_len+3); 1037 skb_put(skb, param_len+value_len+3);
@@ -1088,13 +1078,13 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1088 __u16 val_len; 1078 __u16 val_len;
1089 int n=0; 1079 int n=0;
1090 1080
1091 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 1081 IRDA_DEBUG(4, "%s()\n", __func__ );
1092 1082
1093 /* get length of parameter name (1 byte) */ 1083 /* get length of parameter name (1 byte) */
1094 name_len = buf[n++]; 1084 name_len = buf[n++];
1095 1085
1096 if (name_len > 254) { 1086 if (name_len > 254) {
1097 IRDA_DEBUG(2, "%s(), name_len > 254\n", __FUNCTION__ ); 1087 IRDA_DEBUG(2, "%s(), name_len > 254\n", __func__ );
1098 return -RSP_INVALID_COMMAND_FORMAT; 1088 return -RSP_INVALID_COMMAND_FORMAT;
1099 } 1089 }
1100 1090
@@ -1111,7 +1101,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1111 le16_to_cpus(&val_len); n+=2; 1101 le16_to_cpus(&val_len); n+=2;
1112 1102
1113 if (val_len > 1016) { 1103 if (val_len > 1016) {
1114 IRDA_DEBUG(2, "%s(), parameter length to long\n", __FUNCTION__ ); 1104 IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
1115 return -RSP_INVALID_COMMAND_FORMAT; 1105 return -RSP_INVALID_COMMAND_FORMAT;
1116 } 1106 }
1117 *len = val_len; 1107 *len = val_len;
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 1ab91f787cc1..05112be99569 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -103,7 +103,7 @@ static int irlan_eth_open(struct net_device *dev)
103{ 103{
104 struct irlan_cb *self = netdev_priv(dev); 104 struct irlan_cb *self = netdev_priv(dev);
105 105
106 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 106 IRDA_DEBUG(2, "%s()\n", __func__ );
107 107
108 /* Ready to play! */ 108 /* Ready to play! */
109 netif_stop_queue(dev); /* Wait until data link is ready */ 109 netif_stop_queue(dev); /* Wait until data link is ready */
@@ -130,7 +130,7 @@ static int irlan_eth_close(struct net_device *dev)
130{ 130{
131 struct irlan_cb *self = netdev_priv(dev); 131 struct irlan_cb *self = netdev_priv(dev);
132 132
133 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 133 IRDA_DEBUG(2, "%s()\n", __func__ );
134 134
135 /* Stop device */ 135 /* Stop device */
136 netif_stop_queue(dev); 136 netif_stop_queue(dev);
@@ -221,7 +221,7 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
221 } 221 }
222 if (skb->len < ETH_HLEN) { 222 if (skb->len < ETH_HLEN) {
223 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", 223 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
224 __FUNCTION__, skb->len); 224 __func__, skb->len);
225 ++self->stats.rx_dropped; 225 ++self->stats.rx_dropped;
226 dev_kfree_skb(skb); 226 dev_kfree_skb(skb);
227 return 0; 227 return 0;
@@ -270,7 +270,7 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
270 270
271 IRDA_ASSERT(dev != NULL, return;); 271 IRDA_ASSERT(dev != NULL, return;);
272 272
273 IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __FUNCTION__, 273 IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__,
274 flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", 274 flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
275 netif_running(dev)); 275 netif_running(dev));
276 276
@@ -289,39 +289,6 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
289} 289}
290 290
291/* 291/*
292 * Function irlan_etc_send_gratuitous_arp (dev)
293 *
294 * Send gratuitous ARP to announce that we have changed
295 * hardware address, so that all peers updates their ARP tables
296 */
297void irlan_eth_send_gratuitous_arp(struct net_device *dev)
298{
299#ifdef CONFIG_INET
300 struct in_device *in_dev;
301
302 /*
303 * When we get a new MAC address do a gratuitous ARP. This
304 * is useful if we have changed access points on the same
305 * subnet.
306 */
307 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n");
308 rcu_read_lock();
309 in_dev = __in_dev_get_rcu(dev);
310 if (in_dev == NULL)
311 goto out;
312 if (in_dev->ifa_list)
313
314 arp_send(ARPOP_REQUEST, ETH_P_ARP,
315 in_dev->ifa_list->ifa_address,
316 dev,
317 in_dev->ifa_list->ifa_address,
318 NULL, dev->dev_addr, NULL);
319out:
320 rcu_read_unlock();
321#endif /* CONFIG_INET */
322}
323
324/*
325 * Function set_multicast_list (dev) 292 * Function set_multicast_list (dev)
326 * 293 *
327 * Configure the filtering of the device 294 * Configure the filtering of the device
@@ -332,11 +299,11 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
332{ 299{
333 struct irlan_cb *self = netdev_priv(dev); 300 struct irlan_cb *self = netdev_priv(dev);
334 301
335 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 302 IRDA_DEBUG(2, "%s()\n", __func__ );
336 303
337 /* Check if data channel has been connected yet */ 304 /* Check if data channel has been connected yet */
338 if (self->client.state != IRLAN_DATA) { 305 if (self->client.state != IRLAN_DATA) {
339 IRDA_DEBUG(1, "%s(), delaying!\n", __FUNCTION__ ); 306 IRDA_DEBUG(1, "%s(), delaying!\n", __func__ );
340 return; 307 return;
341 } 308 }
342 309
@@ -346,20 +313,20 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
346 } 313 }
347 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) { 314 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
348 /* Disable promiscuous mode, use normal mode. */ 315 /* Disable promiscuous mode, use normal mode. */
349 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__ ); 316 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
350 /* hardware_set_filter(NULL); */ 317 /* hardware_set_filter(NULL); */
351 318
352 irlan_set_multicast_filter(self, TRUE); 319 irlan_set_multicast_filter(self, TRUE);
353 } 320 }
354 else if (dev->mc_count) { 321 else if (dev->mc_count) {
355 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__ ); 322 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
356 /* Walk the address list, and load the filter */ 323 /* Walk the address list, and load the filter */
357 /* hardware_set_filter(dev->mc_list); */ 324 /* hardware_set_filter(dev->mc_list); */
358 325
359 irlan_set_multicast_filter(self, TRUE); 326 irlan_set_multicast_filter(self, TRUE);
360 } 327 }
361 else { 328 else {
362 IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __FUNCTION__ ); 329 IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__ );
363 irlan_set_multicast_filter(self, FALSE); 330 irlan_set_multicast_filter(self, FALSE);
364 } 331 }
365 332
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c
index a9750a801388..cbcb4eb54037 100644
--- a/net/irda/irlan/irlan_event.c
+++ b/net/irda/irlan/irlan_event.c
@@ -40,7 +40,7 @@ char *irlan_state[] = {
40 40
41void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) 41void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state)
42{ 42{
43 IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__ , irlan_state[state]); 43 IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]);
44 44
45 IRDA_ASSERT(self != NULL, return;); 45 IRDA_ASSERT(self != NULL, return;);
46 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 46 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -50,7 +50,7 @@ void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state)
50 50
51void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state) 51void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state)
52{ 52{
53 IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__ , irlan_state[state]); 53 IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]);
54 54
55 IRDA_ASSERT(self != NULL, return;); 55 IRDA_ASSERT(self != NULL, return;);
56 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 56 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c
index 4384be9a6888..9ff7823abec7 100644
--- a/net/irda/irlan/irlan_filter.c
+++ b/net/irda/irlan/irlan_filter.c
@@ -145,7 +145,7 @@ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value)
145{ 145{
146 __u8 *bytes; 146 __u8 *bytes;
147 147
148 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 148 IRDA_DEBUG(4, "%s()\n", __func__ );
149 149
150 bytes = value; 150 bytes = value;
151 151
@@ -158,7 +158,7 @@ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value)
158 * This is experimental!! DB. 158 * This is experimental!! DB.
159 */ 159 */
160 if (strcmp(param, "MODE") == 0) { 160 if (strcmp(param, "MODE") == 0) {
161 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 161 IRDA_DEBUG(0, "%s()\n", __func__ );
162 self->use_udata = TRUE; 162 self->use_udata = TRUE;
163 return; 163 return;
164 } 164 }
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 13db942812e4..3f81f81b2dfa 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -70,7 +70,7 @@ static int irlan_provider_data_indication(void *instance, void *sap,
70 struct irlan_cb *self; 70 struct irlan_cb *self;
71 __u8 code; 71 __u8 code;
72 72
73 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 73 IRDA_DEBUG(4, "%s()\n", __func__ );
74 74
75 self = (struct irlan_cb *) instance; 75 self = (struct irlan_cb *) instance;
76 76
@@ -99,15 +99,15 @@ static int irlan_provider_data_indication(void *instance, void *sap,
99 irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb); 99 irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb);
100 break; 100 break;
101 case CMD_RECONNECT_DATA_CHAN: 101 case CMD_RECONNECT_DATA_CHAN:
102 IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __FUNCTION__ ); 102 IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __func__ );
103 IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __FUNCTION__ ); 103 IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ );
104 break; 104 break;
105 case CMD_CLOSE_DATA_CHAN: 105 case CMD_CLOSE_DATA_CHAN:
106 IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n"); 106 IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n");
107 IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __FUNCTION__ ); 107 IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ );
108 break; 108 break;
109 default: 109 default:
110 IRDA_DEBUG(2, "%s(), Unknown command!\n", __FUNCTION__ ); 110 IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ );
111 break; 111 break;
112 } 112 }
113 return 0; 113 return 0;
@@ -129,7 +129,7 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
129 struct tsap_cb *tsap; 129 struct tsap_cb *tsap;
130 __u32 saddr, daddr; 130 __u32 saddr, daddr;
131 131
132 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 132 IRDA_DEBUG(0, "%s()\n", __func__ );
133 133
134 self = (struct irlan_cb *) instance; 134 self = (struct irlan_cb *) instance;
135 tsap = (struct tsap_cb *) sap; 135 tsap = (struct tsap_cb *) sap;
@@ -182,7 +182,7 @@ static void irlan_provider_disconnect_indication(void *instance, void *sap,
182 struct irlan_cb *self; 182 struct irlan_cb *self;
183 struct tsap_cb *tsap; 183 struct tsap_cb *tsap;
184 184
185 IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__ , reason); 185 IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
186 186
187 self = (struct irlan_cb *) instance; 187 self = (struct irlan_cb *) instance;
188 tsap = (struct tsap_cb *) sap; 188 tsap = (struct tsap_cb *) sap;
@@ -236,7 +236,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
236 236
237 IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;); 237 IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;);
238 238
239 IRDA_DEBUG(4, "%s(), skb->len=%d\n", __FUNCTION__ , (int)skb->len); 239 IRDA_DEBUG(4, "%s(), skb->len=%d\n", __func__ , (int)skb->len);
240 240
241 IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;); 241 IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;);
242 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;); 242 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;);
@@ -266,7 +266,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
266 for (i=0; i<count;i++) { 266 for (i=0; i<count;i++) {
267 ret = irlan_extract_param(ptr, name, value, &val_len); 267 ret = irlan_extract_param(ptr, name, value, &val_len);
268 if (ret < 0) { 268 if (ret < 0) {
269 IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __FUNCTION__ ); 269 IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ );
270 break; 270 break;
271 } 271 }
272 ptr+=ret; 272 ptr+=ret;
@@ -291,7 +291,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
291{ 291{
292 struct sk_buff *skb; 292 struct sk_buff *skb;
293 293
294 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 294 IRDA_DEBUG(4, "%s()\n", __func__ );
295 295
296 IRDA_ASSERT(self != NULL, return;); 296 IRDA_ASSERT(self != NULL, return;);
297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -323,7 +323,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
323 irlan_insert_string_param(skb, "MEDIA", "802.5"); 323 irlan_insert_string_param(skb, "MEDIA", "802.5");
324 break; 324 break;
325 default: 325 default:
326 IRDA_DEBUG(2, "%s(), unknown media type!\n", __FUNCTION__ ); 326 IRDA_DEBUG(2, "%s(), unknown media type!\n", __func__ );
327 break; 327 break;
328 } 328 }
329 irlan_insert_short_param(skb, "IRLAN_VER", 0x0101); 329 irlan_insert_short_param(skb, "IRLAN_VER", 0x0101);
@@ -347,7 +347,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
347 irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED"); 347 irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED");
348 break; 348 break;
349 default: 349 default:
350 IRDA_DEBUG(2, "%s(), Unknown access type\n", __FUNCTION__ ); 350 IRDA_DEBUG(2, "%s(), Unknown access type\n", __func__ );
351 break; 351 break;
352 } 352 }
353 irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee); 353 irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee);
@@ -367,7 +367,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
367 irlan_filter_request(self, skb); 367 irlan_filter_request(self, skb);
368 break; 368 break;
369 default: 369 default:
370 IRDA_DEBUG(2, "%s(), Unknown command!\n", __FUNCTION__ ); 370 IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ );
371 break; 371 break;
372 } 372 }
373 373
@@ -385,7 +385,7 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
385 struct tsap_cb *tsap; 385 struct tsap_cb *tsap;
386 notify_t notify; 386 notify_t notify;
387 387
388 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 388 IRDA_DEBUG(4, "%s()\n", __func__ );
389 389
390 IRDA_ASSERT(self != NULL, return -1;); 390 IRDA_ASSERT(self != NULL, return -1;);
391 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 391 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -406,7 +406,7 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
406 406
407 tsap = irttp_open_tsap(LSAP_ANY, 1, &notify); 407 tsap = irttp_open_tsap(LSAP_ANY, 1, &notify);
408 if (!tsap) { 408 if (!tsap) {
409 IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__ ); 409 IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ );
410 return -1; 410 return -1;
411 } 411 }
412 self->provider.tsap_ctrl = tsap; 412 self->provider.tsap_ctrl = tsap;
diff --git a/net/irda/irlan/irlan_provider_event.c b/net/irda/irlan/irlan_provider_event.c
index 10ece5a47522..01a9d7c993ee 100644
--- a/net/irda/irlan/irlan_provider_event.c
+++ b/net/irda/irlan/irlan_provider_event.c
@@ -72,7 +72,7 @@ void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event,
72static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, 72static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
73 struct sk_buff *skb) 73 struct sk_buff *skb)
74{ 74{
75 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 75 IRDA_DEBUG(4, "%s()\n", __func__ );
76 76
77 IRDA_ASSERT(self != NULL, return -1;); 77 IRDA_ASSERT(self != NULL, return -1;);
78 78
@@ -82,7 +82,7 @@ static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
82 irlan_next_provider_state( self, IRLAN_INFO); 82 irlan_next_provider_state( self, IRLAN_INFO);
83 break; 83 break;
84 default: 84 default:
85 IRDA_DEBUG(4, "%s(), Unknown event %d\n", __FUNCTION__ , event); 85 IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event);
86 break; 86 break;
87 } 87 }
88 if (skb) 88 if (skb)
@@ -101,7 +101,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
101{ 101{
102 int ret; 102 int ret;
103 103
104 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 104 IRDA_DEBUG(4, "%s()\n", __func__ );
105 105
106 IRDA_ASSERT(self != NULL, return -1;); 106 IRDA_ASSERT(self != NULL, return -1;);
107 107
@@ -147,7 +147,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
147 irlan_next_provider_state(self, IRLAN_IDLE); 147 irlan_next_provider_state(self, IRLAN_IDLE);
148 break; 148 break;
149 default: 149 default:
150 IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __FUNCTION__ , event); 150 IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event);
151 break; 151 break;
152 } 152 }
153 if (skb) 153 if (skb)
@@ -166,7 +166,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
166static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, 166static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
167 struct sk_buff *skb) 167 struct sk_buff *skb)
168{ 168{
169 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 169 IRDA_DEBUG(4, "%s()\n", __func__ );
170 170
171 IRDA_ASSERT(self != NULL, return -1;); 171 IRDA_ASSERT(self != NULL, return -1;);
172 172
@@ -186,7 +186,7 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
186 irlan_next_provider_state(self, IRLAN_IDLE); 186 irlan_next_provider_state(self, IRLAN_IDLE);
187 break; 187 break;
188 default: 188 default:
189 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 189 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
190 break; 190 break;
191 } 191 }
192 if (skb) 192 if (skb)
@@ -205,7 +205,7 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
205static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, 205static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
206 struct sk_buff *skb) 206 struct sk_buff *skb)
207{ 207{
208 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 208 IRDA_DEBUG(4, "%s()\n", __func__ );
209 209
210 IRDA_ASSERT(self != NULL, return -1;); 210 IRDA_ASSERT(self != NULL, return -1;);
211 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 211 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -221,7 +221,7 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
221 irlan_next_provider_state(self, IRLAN_IDLE); 221 irlan_next_provider_state(self, IRLAN_IDLE);
222 break; 222 break;
223 default: 223 default:
224 IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __FUNCTION__ , event); 224 IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event);
225 break; 225 break;
226 } 226 }
227 if (skb) 227 if (skb)
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index f3236acc8d22..e4965b764b9b 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -88,7 +88,7 @@ int __init irlap_init(void)
88 irlap = hashbin_new(HB_LOCK); 88 irlap = hashbin_new(HB_LOCK);
89 if (irlap == NULL) { 89 if (irlap == NULL) {
90 IRDA_ERROR("%s: can't allocate irlap hashbin!\n", 90 IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
91 __FUNCTION__); 91 __func__);
92 return -ENOMEM; 92 return -ENOMEM;
93 } 93 }
94 94
@@ -113,7 +113,7 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
113{ 113{
114 struct irlap_cb *self; 114 struct irlap_cb *self;
115 115
116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 116 IRDA_DEBUG(4, "%s()\n", __func__);
117 117
118 /* Initialize the irlap structure. */ 118 /* Initialize the irlap structure. */
119 self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); 119 self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
@@ -215,7 +215,7 @@ void irlap_close(struct irlap_cb *self)
215{ 215{
216 struct irlap_cb *lap; 216 struct irlap_cb *lap;
217 217
218 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 218 IRDA_DEBUG(4, "%s()\n", __func__);
219 219
220 IRDA_ASSERT(self != NULL, return;); 220 IRDA_ASSERT(self != NULL, return;);
221 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 221 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -231,7 +231,7 @@ void irlap_close(struct irlap_cb *self)
231 /* Be sure that we manage to remove ourself from the hash */ 231 /* Be sure that we manage to remove ourself from the hash */
232 lap = hashbin_remove(irlap, self->saddr, NULL); 232 lap = hashbin_remove(irlap, self->saddr, NULL);
233 if (!lap) { 233 if (!lap) {
234 IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __FUNCTION__); 234 IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__);
235 return; 235 return;
236 } 236 }
237 __irlap_close(lap); 237 __irlap_close(lap);
@@ -246,7 +246,7 @@ EXPORT_SYMBOL(irlap_close);
246 */ 246 */
247void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) 247void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
248{ 248{
249 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 249 IRDA_DEBUG(4, "%s()\n", __func__);
250 250
251 IRDA_ASSERT(self != NULL, return;); 251 IRDA_ASSERT(self != NULL, return;);
252 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 252 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -265,7 +265,7 @@ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
265 */ 265 */
266void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) 266void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
267{ 267{
268 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 268 IRDA_DEBUG(4, "%s()\n", __func__);
269 269
270 irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL); 270 irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
271} 271}
@@ -280,7 +280,7 @@ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
280void irlap_connect_request(struct irlap_cb *self, __u32 daddr, 280void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
281 struct qos_info *qos_user, int sniff) 281 struct qos_info *qos_user, int sniff)
282{ 282{
283 IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __FUNCTION__, daddr); 283 IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr);
284 284
285 IRDA_ASSERT(self != NULL, return;); 285 IRDA_ASSERT(self != NULL, return;);
286 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 286 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -307,7 +307,7 @@ void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
307 */ 307 */
308void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb) 308void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
309{ 309{
310 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 310 IRDA_DEBUG(4, "%s()\n", __func__);
311 311
312 IRDA_ASSERT(self != NULL, return;); 312 IRDA_ASSERT(self != NULL, return;);
313 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 313 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -344,7 +344,7 @@ void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
344 IRDA_ASSERT(self != NULL, return;); 344 IRDA_ASSERT(self != NULL, return;);
345 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 345 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
346 346
347 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 347 IRDA_DEBUG(3, "%s()\n", __func__);
348 348
349 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), 349 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
350 return;); 350 return;);
@@ -391,7 +391,7 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
391 IRDA_ASSERT(self != NULL, return;); 391 IRDA_ASSERT(self != NULL, return;);
392 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 392 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
393 393
394 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 394 IRDA_DEBUG(3, "%s()\n", __func__);
395 395
396 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), 396 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
397 return;); 397 return;);
@@ -417,7 +417,7 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
417#ifdef CONFIG_IRDA_ULTRA 417#ifdef CONFIG_IRDA_ULTRA
418void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) 418void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
419{ 419{
420 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 420 IRDA_DEBUG(1, "%s()\n", __func__);
421 421
422 IRDA_ASSERT(self != NULL, return;); 422 IRDA_ASSERT(self != NULL, return;);
423 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 423 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -437,7 +437,7 @@ void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
437 */ 437 */
438void irlap_disconnect_request(struct irlap_cb *self) 438void irlap_disconnect_request(struct irlap_cb *self)
439{ 439{
440 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 440 IRDA_DEBUG(3, "%s()\n", __func__);
441 441
442 IRDA_ASSERT(self != NULL, return;); 442 IRDA_ASSERT(self != NULL, return;);
443 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 443 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -458,7 +458,7 @@ void irlap_disconnect_request(struct irlap_cb *self)
458 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); 458 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
459 break; 459 break;
460 default: 460 default:
461 IRDA_DEBUG(2, "%s(), disconnect pending!\n", __FUNCTION__); 461 IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__);
462 self->disconnect_pending = TRUE; 462 self->disconnect_pending = TRUE;
463 break; 463 break;
464 } 464 }
@@ -472,7 +472,7 @@ void irlap_disconnect_request(struct irlap_cb *self)
472 */ 472 */
473void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) 473void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
474{ 474{
475 IRDA_DEBUG(1, "%s(), reason=%s\n", __FUNCTION__, lap_reasons[reason]); 475 IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]);
476 476
477 IRDA_ASSERT(self != NULL, return;); 477 IRDA_ASSERT(self != NULL, return;);
478 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 478 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -482,7 +482,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
482 482
483 switch (reason) { 483 switch (reason) {
484 case LAP_RESET_INDICATION: 484 case LAP_RESET_INDICATION:
485 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __FUNCTION__); 485 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
486 irlap_do_event(self, RESET_REQUEST, NULL, NULL); 486 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
487 break; 487 break;
488 case LAP_NO_RESPONSE: /* FALLTROUGH */ 488 case LAP_NO_RESPONSE: /* FALLTROUGH */
@@ -493,7 +493,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
493 reason, NULL); 493 reason, NULL);
494 break; 494 break;
495 default: 495 default:
496 IRDA_ERROR("%s: Unknown reason %d\n", __FUNCTION__, reason); 496 IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason);
497 } 497 }
498} 498}
499 499
@@ -511,7 +511,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
511 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 511 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
512 IRDA_ASSERT(discovery != NULL, return;); 512 IRDA_ASSERT(discovery != NULL, return;);
513 513
514 IRDA_DEBUG(4, "%s(), nslots = %d\n", __FUNCTION__, discovery->nslots); 514 IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots);
515 515
516 IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) || 516 IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
517 (discovery->nslots == 8) || (discovery->nslots == 16), 517 (discovery->nslots == 8) || (discovery->nslots == 16),
@@ -520,7 +520,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
520 /* Discovery is only possible in NDM mode */ 520 /* Discovery is only possible in NDM mode */
521 if (self->state != LAP_NDM) { 521 if (self->state != LAP_NDM) {
522 IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n", 522 IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
523 __FUNCTION__); 523 __func__);
524 irlap_discovery_confirm(self, NULL); 524 irlap_discovery_confirm(self, NULL);
525 /* Note : in theory, if we are not in NDM, we could postpone 525 /* Note : in theory, if we are not in NDM, we could postpone
526 * the discovery like we do for connection request. 526 * the discovery like we do for connection request.
@@ -543,7 +543,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
543 543
544 if (self->discovery_log == NULL) { 544 if (self->discovery_log == NULL) {
545 IRDA_WARNING("%s(), Unable to allocate discovery log!\n", 545 IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
546 __FUNCTION__); 546 __func__);
547 return; 547 return;
548 } 548 }
549 549
@@ -598,7 +598,7 @@ void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
598 */ 598 */
599void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery) 599void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
600{ 600{
601 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 601 IRDA_DEBUG(4, "%s()\n", __func__);
602 602
603 IRDA_ASSERT(self != NULL, return;); 603 IRDA_ASSERT(self != NULL, return;);
604 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 604 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -644,7 +644,7 @@ void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
644 */ 644 */
645void irlap_reset_indication(struct irlap_cb *self) 645void irlap_reset_indication(struct irlap_cb *self)
646{ 646{
647 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 647 IRDA_DEBUG(1, "%s()\n", __func__);
648 648
649 IRDA_ASSERT(self != NULL, return;); 649 IRDA_ASSERT(self != NULL, return;);
650 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 650 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -660,7 +660,7 @@ void irlap_reset_indication(struct irlap_cb *self)
660 */ 660 */
661void irlap_reset_confirm(void) 661void irlap_reset_confirm(void)
662{ 662{
663 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 663 IRDA_DEBUG(1, "%s()\n", __func__);
664} 664}
665 665
666/* 666/*
@@ -760,7 +760,7 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr)
760{ 760{
761 /* nr as expected? */ 761 /* nr as expected? */
762 if (nr == self->vs) { 762 if (nr == self->vs) {
763 IRDA_DEBUG(4, "%s(), expected!\n", __FUNCTION__); 763 IRDA_DEBUG(4, "%s(), expected!\n", __func__);
764 return NR_EXPECTED; 764 return NR_EXPECTED;
765 } 765 }
766 766
@@ -788,7 +788,7 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr)
788 */ 788 */
789void irlap_initiate_connection_state(struct irlap_cb *self) 789void irlap_initiate_connection_state(struct irlap_cb *self)
790{ 790{
791 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 791 IRDA_DEBUG(4, "%s()\n", __func__);
792 792
793 IRDA_ASSERT(self != NULL, return;); 793 IRDA_ASSERT(self != NULL, return;);
794 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 794 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -871,7 +871,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
871{ 871{
872 struct sk_buff *skb; 872 struct sk_buff *skb;
873 873
874 IRDA_DEBUG(0, "%s(), setting speed to %d\n", __FUNCTION__, speed); 874 IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed);
875 875
876 IRDA_ASSERT(self != NULL, return;); 876 IRDA_ASSERT(self != NULL, return;);
877 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 877 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -914,7 +914,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self,
914 * user may not have set all of them. 914 * user may not have set all of them.
915 */ 915 */
916 if (qos_user) { 916 if (qos_user) {
917 IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __FUNCTION__); 917 IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__);
918 918
919 if (qos_user->baud_rate.bits) 919 if (qos_user->baud_rate.bits)
920 self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits; 920 self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
@@ -944,7 +944,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self,
944 */ 944 */
945void irlap_apply_default_connection_parameters(struct irlap_cb *self) 945void irlap_apply_default_connection_parameters(struct irlap_cb *self)
946{ 946{
947 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 947 IRDA_DEBUG(4, "%s()\n", __func__);
948 948
949 IRDA_ASSERT(self != NULL, return;); 949 IRDA_ASSERT(self != NULL, return;);
950 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 950 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -1007,7 +1007,7 @@ void irlap_apply_default_connection_parameters(struct irlap_cb *self)
1007 */ 1007 */
1008void irlap_apply_connection_parameters(struct irlap_cb *self, int now) 1008void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1009{ 1009{
1010 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1010 IRDA_DEBUG(4, "%s()\n", __func__);
1011 1011
1012 IRDA_ASSERT(self != NULL, return;); 1012 IRDA_ASSERT(self != NULL, return;);
1013 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 1013 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 6af86eba7463..16c4ef0f5c1a 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -217,7 +217,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
217 } else 217 } else
218 self->fast_RR = FALSE; 218 self->fast_RR = FALSE;
219 219
220 IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __FUNCTION__, timeout, jiffies); 220 IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies);
221#endif /* CONFIG_IRDA_FAST_RR */ 221#endif /* CONFIG_IRDA_FAST_RR */
222 222
223 if (timeout == 0) 223 if (timeout == 0)
@@ -241,7 +241,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
241 if (!self || self->magic != LAP_MAGIC) 241 if (!self || self->magic != LAP_MAGIC)
242 return; 242 return;
243 243
244 IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __FUNCTION__, 244 IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __func__,
245 irlap_event[event], irlap_state[self->state]); 245 irlap_event[event], irlap_state[self->state]);
246 246
247 ret = (*state[self->state])(self, event, skb, info); 247 ret = (*state[self->state])(self, event, skb, info);
@@ -259,7 +259,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
259 * try to disconnect link if we send any data frames, since 259 * try to disconnect link if we send any data frames, since
260 * that will change the state away form XMIT 260 * that will change the state away form XMIT
261 */ 261 */
262 IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, 262 IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
263 skb_queue_len(&self->txq)); 263 skb_queue_len(&self->txq));
264 264
265 if (!skb_queue_empty(&self->txq)) { 265 if (!skb_queue_empty(&self->txq)) {
@@ -340,7 +340,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
340 * media busy in irlap_connect_request() and 340 * media busy in irlap_connect_request() and
341 * postpone the event... - Jean II */ 341 * postpone the event... - Jean II */
342 IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n", 342 IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n",
343 __FUNCTION__); 343 __func__);
344 344
345 /* Always switch state before calling upper layers */ 345 /* Always switch state before calling upper layers */
346 irlap_next_state(self, LAP_NDM); 346 irlap_next_state(self, LAP_NDM);
@@ -367,7 +367,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
367 irlap_connect_indication(self, skb); 367 irlap_connect_indication(self, skb);
368 } else { 368 } else {
369 IRDA_DEBUG(0, "%s(), SNRM frame does not " 369 IRDA_DEBUG(0, "%s(), SNRM frame does not "
370 "contain an I field!\n", __FUNCTION__); 370 "contain an I field!\n", __func__);
371 } 371 }
372 break; 372 break;
373 case DISCOVERY_REQUEST: 373 case DISCOVERY_REQUEST:
@@ -375,7 +375,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
375 375
376 if (self->media_busy) { 376 if (self->media_busy) {
377 IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n", 377 IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n",
378 __FUNCTION__); 378 __func__);
379 /* irlap->log.condition = MEDIA_BUSY; */ 379 /* irlap->log.condition = MEDIA_BUSY; */
380 380
381 /* This will make IrLMP try again */ 381 /* This will make IrLMP try again */
@@ -441,7 +441,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
441 * those cases... 441 * those cases...
442 * Jean II 442 * Jean II
443 */ 443 */
444 IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __FUNCTION__); 444 IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __func__);
445 445
446 /* Last discovery request -> in the log */ 446 /* Last discovery request -> in the log */
447 irlap_discovery_indication(self, info->discovery); 447 irlap_discovery_indication(self, info->discovery);
@@ -520,7 +520,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
520 /* Only accept broadcast frames in NDM mode */ 520 /* Only accept broadcast frames in NDM mode */
521 if (info->caddr != CBROADCAST) { 521 if (info->caddr != CBROADCAST) {
522 IRDA_DEBUG(0, "%s(), not a broadcast frame!\n", 522 IRDA_DEBUG(0, "%s(), not a broadcast frame!\n",
523 __FUNCTION__); 523 __func__);
524 } else 524 } else
525 irlap_unitdata_indication(self, skb); 525 irlap_unitdata_indication(self, skb);
526 break; 526 break;
@@ -536,10 +536,10 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
536 irlap_send_test_frame(self, CBROADCAST, info->daddr, skb); 536 irlap_send_test_frame(self, CBROADCAST, info->daddr, skb);
537 break; 537 break;
538 case RECV_TEST_RSP: 538 case RECV_TEST_RSP:
539 IRDA_DEBUG(0, "%s() not implemented!\n", __FUNCTION__); 539 IRDA_DEBUG(0, "%s() not implemented!\n", __func__);
540 break; 540 break;
541 default: 541 default:
542 IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, 542 IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
543 irlap_event[event]); 543 irlap_event[event]);
544 544
545 ret = -1; 545 ret = -1;
@@ -567,13 +567,13 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
567 IRDA_ASSERT(info != NULL, return -1;); 567 IRDA_ASSERT(info != NULL, return -1;);
568 IRDA_ASSERT(info->discovery != NULL, return -1;); 568 IRDA_ASSERT(info->discovery != NULL, return -1;);
569 569
570 IRDA_DEBUG(4, "%s(), daddr=%08x\n", __FUNCTION__, 570 IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__,
571 info->discovery->data.daddr); 571 info->discovery->data.daddr);
572 572
573 if (!self->discovery_log) { 573 if (!self->discovery_log) {
574 IRDA_WARNING("%s: discovery log is gone! " 574 IRDA_WARNING("%s: discovery log is gone! "
575 "maybe the discovery timeout has been set" 575 "maybe the discovery timeout has been set"
576 " too short?\n", __FUNCTION__); 576 " too short?\n", __func__);
577 break; 577 break;
578 } 578 }
579 hashbin_insert(self->discovery_log, 579 hashbin_insert(self->discovery_log,
@@ -598,7 +598,7 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
598 598
599 IRDA_ASSERT(info != NULL, return -1;); 599 IRDA_ASSERT(info != NULL, return -1;);
600 600
601 IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __FUNCTION__, info->s); 601 IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __func__, info->s);
602 602
603 /* Last discovery request ? */ 603 /* Last discovery request ? */
604 if (info->s == 0xff) 604 if (info->s == 0xff)
@@ -613,7 +613,7 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
613 */ 613 */
614 if (irda_device_is_receiving(self->netdev) && !self->add_wait) { 614 if (irda_device_is_receiving(self->netdev) && !self->add_wait) {
615 IRDA_DEBUG(2, "%s(), device is slow to answer, " 615 IRDA_DEBUG(2, "%s(), device is slow to answer, "
616 "waiting some more!\n", __FUNCTION__); 616 "waiting some more!\n", __func__);
617 irlap_start_slot_timer(self, msecs_to_jiffies(10)); 617 irlap_start_slot_timer(self, msecs_to_jiffies(10));
618 self->add_wait = TRUE; 618 self->add_wait = TRUE;
619 return ret; 619 return ret;
@@ -649,7 +649,7 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
649 } 649 }
650 break; 650 break;
651 default: 651 default:
652 IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, 652 IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
653 irlap_event[event]); 653 irlap_event[event]);
654 654
655 ret = -1; 655 ret = -1;
@@ -671,7 +671,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
671 discovery_t *discovery_rsp; 671 discovery_t *discovery_rsp;
672 int ret=0; 672 int ret=0;
673 673
674 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 674 IRDA_DEBUG(4, "%s()\n", __func__);
675 675
676 IRDA_ASSERT(self != NULL, return -1;); 676 IRDA_ASSERT(self != NULL, return -1;);
677 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 677 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -679,7 +679,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
679 switch (event) { 679 switch (event) {
680 case QUERY_TIMER_EXPIRED: 680 case QUERY_TIMER_EXPIRED:
681 IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n", 681 IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n",
682 __FUNCTION__, jiffies); 682 __func__, jiffies);
683 irlap_next_state(self, LAP_NDM); 683 irlap_next_state(self, LAP_NDM);
684 break; 684 break;
685 case RECV_DISCOVERY_XID_CMD: 685 case RECV_DISCOVERY_XID_CMD:
@@ -717,7 +717,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
717 } 717 }
718 break; 718 break;
719 default: 719 default:
720 IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, 720 IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
721 event, irlap_event[event]); 721 event, irlap_event[event]);
722 722
723 ret = -1; 723 ret = -1;
@@ -738,7 +738,7 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
738{ 738{
739 int ret = 0; 739 int ret = 0;
740 740
741 IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[ event]); 741 IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]);
742 742
743 IRDA_ASSERT(self != NULL, return -1;); 743 IRDA_ASSERT(self != NULL, return -1;);
744 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 744 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -799,18 +799,18 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
799 break; 799 break;
800 case RECV_DISCOVERY_XID_CMD: 800 case RECV_DISCOVERY_XID_CMD:
801 IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n", 801 IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n",
802 __FUNCTION__); 802 __func__);
803 irlap_next_state(self, LAP_NDM); 803 irlap_next_state(self, LAP_NDM);
804 804
805 break; 805 break;
806 case DISCONNECT_REQUEST: 806 case DISCONNECT_REQUEST:
807 IRDA_DEBUG(0, "%s(), Disconnect request!\n", __FUNCTION__); 807 IRDA_DEBUG(0, "%s(), Disconnect request!\n", __func__);
808 irlap_send_dm_frame(self); 808 irlap_send_dm_frame(self);
809 irlap_next_state( self, LAP_NDM); 809 irlap_next_state( self, LAP_NDM);
810 irlap_disconnect_indication(self, LAP_DISC_INDICATION); 810 irlap_disconnect_indication(self, LAP_DISC_INDICATION);
811 break; 811 break;
812 default: 812 default:
813 IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, 813 IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
814 event, irlap_event[event]); 814 event, irlap_event[event]);
815 815
816 ret = -1; 816 ret = -1;
@@ -832,7 +832,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
832{ 832{
833 int ret = 0; 833 int ret = 0;
834 834
835 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 835 IRDA_DEBUG(4, "%s()\n", __func__);
836 836
837 IRDA_ASSERT(self != NULL, return -1;); 837 IRDA_ASSERT(self != NULL, return -1;);
838 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 838 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -861,7 +861,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
861 self->retry_count++; 861 self->retry_count++;
862 break; 862 break;
863 case RECV_SNRM_CMD: 863 case RECV_SNRM_CMD:
864 IRDA_DEBUG(4, "%s(), SNRM battle!\n", __FUNCTION__); 864 IRDA_DEBUG(4, "%s(), SNRM battle!\n", __func__);
865 865
866 IRDA_ASSERT(skb != NULL, return 0;); 866 IRDA_ASSERT(skb != NULL, return 0;);
867 IRDA_ASSERT(info != NULL, return 0;); 867 IRDA_ASSERT(info != NULL, return 0;);
@@ -948,7 +948,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
948 irlap_disconnect_indication(self, LAP_DISC_INDICATION); 948 irlap_disconnect_indication(self, LAP_DISC_INDICATION);
949 break; 949 break;
950 default: 950 default:
951 IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, 951 IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
952 event, irlap_event[event]); 952 event, irlap_event[event]);
953 953
954 ret = -1; 954 ret = -1;
@@ -966,7 +966,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
966static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, 966static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event,
967 struct sk_buff *skb, struct irlap_info *info) 967 struct sk_buff *skb, struct irlap_info *info)
968{ 968{
969 IRDA_DEBUG( 0, "%s(), Unknown event\n", __FUNCTION__); 969 IRDA_DEBUG( 0, "%s(), Unknown event\n", __func__);
970 970
971 return -1; 971 return -1;
972} 972}
@@ -1030,7 +1030,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
1030 */ 1030 */
1031 if((!nextfit) && (skb->len > self->bytes_left)) { 1031 if((!nextfit) && (skb->len > self->bytes_left)) {
1032 IRDA_DEBUG(0, "%s(), Not allowed to transmit" 1032 IRDA_DEBUG(0, "%s(), Not allowed to transmit"
1033 " more bytes!\n", __FUNCTION__); 1033 " more bytes!\n", __func__);
1034 /* Requeue the skb */ 1034 /* Requeue the skb */
1035 skb_queue_head(&self->txq, skb_get(skb)); 1035 skb_queue_head(&self->txq, skb_get(skb));
1036 /* 1036 /*
@@ -1082,7 +1082,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
1082#endif /* CONFIG_IRDA_FAST_RR */ 1082#endif /* CONFIG_IRDA_FAST_RR */
1083 } else { 1083 } else {
1084 IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n", 1084 IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n",
1085 __FUNCTION__); 1085 __func__);
1086 skb_queue_head(&self->txq, skb_get(skb)); 1086 skb_queue_head(&self->txq, skb_get(skb));
1087 1087
1088 /* 1088 /*
@@ -1094,7 +1094,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
1094 break; 1094 break;
1095 case POLL_TIMER_EXPIRED: 1095 case POLL_TIMER_EXPIRED:
1096 IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n", 1096 IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n",
1097 __FUNCTION__, jiffies); 1097 __func__, jiffies);
1098 irlap_send_rr_frame(self, CMD_FRAME); 1098 irlap_send_rr_frame(self, CMD_FRAME);
1099 /* Return to NRM properly - Jean II */ 1099 /* Return to NRM properly - Jean II */
1100 self->window = self->window_size; 1100 self->window = self->window_size;
@@ -1120,7 +1120,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
1120 break; 1120 break;
1121 default: 1121 default:
1122 IRDA_DEBUG(0, "%s(), Unknown event %s\n", 1122 IRDA_DEBUG(0, "%s(), Unknown event %s\n",
1123 __FUNCTION__, irlap_event[event]); 1123 __func__, irlap_event[event]);
1124 1124
1125 ret = -EINVAL; 1125 ret = -EINVAL;
1126 break; 1126 break;
@@ -1138,7 +1138,7 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event,
1138{ 1138{
1139 int ret = 0; 1139 int ret = 0;
1140 1140
1141 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1141 IRDA_DEBUG(1, "%s()\n", __func__);
1142 1142
1143 IRDA_ASSERT(self != NULL, return -1;); 1143 IRDA_ASSERT(self != NULL, return -1;);
1144 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 1144 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -1173,7 +1173,7 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event,
1173 } 1173 }
1174 break; 1174 break;
1175 default: 1175 default:
1176 IRDA_DEBUG(1, "%s(), Unknown event %d\n", __FUNCTION__, event); 1176 IRDA_DEBUG(1, "%s(), Unknown event %d\n", __func__, event);
1177 1177
1178 ret = -1; 1178 ret = -1;
1179 break; 1179 break;
@@ -1297,7 +1297,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1297 } else { 1297 } else {
1298 IRDA_DEBUG(4, 1298 IRDA_DEBUG(4,
1299 "%s(), missing or duplicate frame!\n", 1299 "%s(), missing or duplicate frame!\n",
1300 __FUNCTION__); 1300 __func__);
1301 1301
1302 /* Update Nr received */ 1302 /* Update Nr received */
1303 irlap_update_nr_received(self, info->nr); 1303 irlap_update_nr_received(self, info->nr);
@@ -1367,7 +1367,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1367 (nr_status == NR_UNEXPECTED)) 1367 (nr_status == NR_UNEXPECTED))
1368 { 1368 {
1369 IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n", 1369 IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n",
1370 __FUNCTION__); 1370 __func__);
1371 if (info->pf) { 1371 if (info->pf) {
1372 /* Resend rejected frames */ 1372 /* Resend rejected frames */
1373 irlap_resend_rejected_frames(self, CMD_FRAME); 1373 irlap_resend_rejected_frames(self, CMD_FRAME);
@@ -1407,9 +1407,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1407 } 1407 }
1408 break; 1408 break;
1409 } 1409 }
1410 IRDA_DEBUG(1, "%s(), Not implemented!\n", __FUNCTION__); 1410 IRDA_DEBUG(1, "%s(), Not implemented!\n", __func__);
1411 IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n", 1411 IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n",
1412 __FUNCTION__, irlap_event[event], ns_status, nr_status); 1412 __func__, irlap_event[event], ns_status, nr_status);
1413 break; 1413 break;
1414 case RECV_UI_FRAME: 1414 case RECV_UI_FRAME:
1415 /* Poll bit cleared? */ 1415 /* Poll bit cleared? */
@@ -1420,7 +1420,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1420 del_timer(&self->final_timer); 1420 del_timer(&self->final_timer);
1421 irlap_data_indication(self, skb, TRUE); 1421 irlap_data_indication(self, skb, TRUE);
1422 irlap_next_state(self, LAP_XMIT_P); 1422 irlap_next_state(self, LAP_XMIT_P);
1423 IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __FUNCTION__, irlap_state[self->state]); 1423 IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __func__, irlap_state[self->state]);
1424 irlap_start_poll_timer(self, self->poll_timeout); 1424 irlap_start_poll_timer(self, self->poll_timeout);
1425 } 1425 }
1426 break; 1426 break;
@@ -1475,7 +1475,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1475 irlap_next_state(self, LAP_NRM_P); 1475 irlap_next_state(self, LAP_NRM_P);
1476 } else if (ret == NR_INVALID) { 1476 } else if (ret == NR_INVALID) {
1477 IRDA_DEBUG(1, "%s(), Received RR with " 1477 IRDA_DEBUG(1, "%s(), Received RR with "
1478 "invalid nr !\n", __FUNCTION__); 1478 "invalid nr !\n", __func__);
1479 1479
1480 irlap_next_state(self, LAP_RESET_WAIT); 1480 irlap_next_state(self, LAP_RESET_WAIT);
1481 1481
@@ -1580,7 +1580,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1580 irlap_start_final_timer(self, 2 * self->final_timeout); 1580 irlap_start_final_timer(self, 2 * self->final_timeout);
1581 break; 1581 break;
1582 case RECV_RD_RSP: 1582 case RECV_RD_RSP:
1583 IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __FUNCTION__); 1583 IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __func__);
1584 1584
1585 irlap_flush_all_queues(self); 1585 irlap_flush_all_queues(self);
1586 irlap_next_state(self, LAP_XMIT_P); 1586 irlap_next_state(self, LAP_XMIT_P);
@@ -1589,7 +1589,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1589 break; 1589 break;
1590 default: 1590 default:
1591 IRDA_DEBUG(1, "%s(), Unknown event %s\n", 1591 IRDA_DEBUG(1, "%s(), Unknown event %s\n",
1592 __FUNCTION__, irlap_event[event]); 1592 __func__, irlap_event[event]);
1593 1593
1594 ret = -1; 1594 ret = -1;
1595 break; 1595 break;
@@ -1609,7 +1609,7 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
1609{ 1609{
1610 int ret = 0; 1610 int ret = 0;
1611 1611
1612 IRDA_DEBUG(3, "%s(), event = %s\n", __FUNCTION__, irlap_event[event]); 1612 IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]);
1613 1613
1614 IRDA_ASSERT(self != NULL, return -1;); 1614 IRDA_ASSERT(self != NULL, return -1;);
1615 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 1615 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -1635,7 +1635,7 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
1635 irlap_next_state( self, LAP_PCLOSE); 1635 irlap_next_state( self, LAP_PCLOSE);
1636 break; 1636 break;
1637 default: 1637 default:
1638 IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, 1638 IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
1639 irlap_event[event]); 1639 irlap_event[event]);
1640 1640
1641 ret = -1; 1641 ret = -1;
@@ -1656,7 +1656,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
1656{ 1656{
1657 int ret = 0; 1657 int ret = 0;
1658 1658
1659 IRDA_DEBUG(3, "%s(), event = %s\n", __FUNCTION__, irlap_event[event]); 1659 IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]);
1660 1660
1661 IRDA_ASSERT(self != NULL, return -1;); 1661 IRDA_ASSERT(self != NULL, return -1;);
1662 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 1662 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -1714,7 +1714,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
1714 * state 1714 * state
1715 */ 1715 */
1716 if (!info) { 1716 if (!info) {
1717 IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __FUNCTION__); 1717 IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __func__);
1718 irlap_initiate_connection_state(self); 1718 irlap_initiate_connection_state(self);
1719 irlap_wait_min_turn_around(self, &self->qos_tx); 1719 irlap_wait_min_turn_around(self, &self->qos_tx);
1720 irlap_send_ua_response_frame(self, &self->qos_rx); 1720 irlap_send_ua_response_frame(self, &self->qos_rx);
@@ -1724,12 +1724,12 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
1724 } else { 1724 } else {
1725 IRDA_DEBUG(0, 1725 IRDA_DEBUG(0,
1726 "%s(), SNRM frame contained an I field!\n", 1726 "%s(), SNRM frame contained an I field!\n",
1727 __FUNCTION__); 1727 __func__);
1728 } 1728 }
1729 break; 1729 break;
1730 default: 1730 default:
1731 IRDA_DEBUG(1, "%s(), Unknown event %s\n", 1731 IRDA_DEBUG(1, "%s(), Unknown event %s\n",
1732 __FUNCTION__, irlap_event[event]); 1732 __func__, irlap_event[event]);
1733 1733
1734 ret = -1; 1734 ret = -1;
1735 break; 1735 break;
@@ -1749,7 +1749,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
1749{ 1749{
1750 int ret = 0; 1750 int ret = 0;
1751 1751
1752 IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[event]); 1752 IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[event]);
1753 1753
1754 IRDA_ASSERT(self != NULL, return -ENODEV;); 1754 IRDA_ASSERT(self != NULL, return -ENODEV;);
1755 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); 1755 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
@@ -1786,7 +1786,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
1786 */ 1786 */
1787 if((!nextfit) && (skb->len > self->bytes_left)) { 1787 if((!nextfit) && (skb->len > self->bytes_left)) {
1788 IRDA_DEBUG(0, "%s(), Not allowed to transmit" 1788 IRDA_DEBUG(0, "%s(), Not allowed to transmit"
1789 " more bytes!\n", __FUNCTION__); 1789 " more bytes!\n", __func__);
1790 /* Requeue the skb */ 1790 /* Requeue the skb */
1791 skb_queue_head(&self->txq, skb_get(skb)); 1791 skb_queue_head(&self->txq, skb_get(skb));
1792 1792
@@ -1832,7 +1832,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
1832 ret = -EPROTO; 1832 ret = -EPROTO;
1833 } 1833 }
1834 } else { 1834 } else {
1835 IRDA_DEBUG(2, "%s(), Unable to send!\n", __FUNCTION__); 1835 IRDA_DEBUG(2, "%s(), Unable to send!\n", __func__);
1836 skb_queue_head(&self->txq, skb_get(skb)); 1836 skb_queue_head(&self->txq, skb_get(skb));
1837 ret = -EPROTO; 1837 ret = -EPROTO;
1838 } 1838 }
@@ -1848,7 +1848,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
1848 * when we return... - Jean II */ 1848 * when we return... - Jean II */
1849 break; 1849 break;
1850 default: 1850 default:
1851 IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, 1851 IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
1852 irlap_event[event]); 1852 irlap_event[event]);
1853 1853
1854 ret = -EINVAL; 1854 ret = -EINVAL;
@@ -1871,7 +1871,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
1871 int nr_status; 1871 int nr_status;
1872 int ret = 0; 1872 int ret = 0;
1873 1873
1874 IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[ event]); 1874 IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]);
1875 1875
1876 IRDA_ASSERT(self != NULL, return -1;); 1876 IRDA_ASSERT(self != NULL, return -1;);
1877 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 1877 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -1880,7 +1880,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
1880 case RECV_I_CMD: /* Optimize for the common case */ 1880 case RECV_I_CMD: /* Optimize for the common case */
1881 /* FIXME: must check for remote_busy below */ 1881 /* FIXME: must check for remote_busy below */
1882 IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, " 1882 IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, "
1883 "vr=%d, pf=%d\n", __FUNCTION__, 1883 "vr=%d, pf=%d\n", __func__,
1884 irlap_event[event], info->nr, 1884 irlap_event[event], info->nr,
1885 self->vs, info->ns, self->vr, info->pf); 1885 self->vs, info->ns, self->vr, info->pf);
1886 1886
@@ -2112,21 +2112,21 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2112 irlap_next_state(self, LAP_NRM_S); 2112 irlap_next_state(self, LAP_NRM_S);
2113 } else { 2113 } else {
2114 IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n", 2114 IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n",
2115 __FUNCTION__); 2115 __func__);
2116 } 2116 }
2117 break; 2117 break;
2118 case RECV_SNRM_CMD: 2118 case RECV_SNRM_CMD:
2119 /* SNRM frame is not allowed to contain an I-field */ 2119 /* SNRM frame is not allowed to contain an I-field */
2120 if (!info) { 2120 if (!info) {
2121 del_timer(&self->wd_timer); 2121 del_timer(&self->wd_timer);
2122 IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __FUNCTION__); 2122 IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __func__);
2123 irlap_next_state(self, LAP_RESET_CHECK); 2123 irlap_next_state(self, LAP_RESET_CHECK);
2124 2124
2125 irlap_reset_indication(self); 2125 irlap_reset_indication(self);
2126 } else { 2126 } else {
2127 IRDA_DEBUG(0, 2127 IRDA_DEBUG(0,
2128 "%s(), SNRM frame contained an I-field!\n", 2128 "%s(), SNRM frame contained an I-field!\n",
2129 __FUNCTION__); 2129 __func__);
2130 2130
2131 } 2131 }
2132 break; 2132 break;
@@ -2158,7 +2158,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2158 * which explain why we use (self->N2 / 2) here !!! 2158 * which explain why we use (self->N2 / 2) here !!!
2159 * Jean II 2159 * Jean II
2160 */ 2160 */
2161 IRDA_DEBUG(1, "%s(), retry_count = %d\n", __FUNCTION__, 2161 IRDA_DEBUG(1, "%s(), retry_count = %d\n", __func__,
2162 self->retry_count); 2162 self->retry_count);
2163 2163
2164 if (self->retry_count < (self->N2 / 2)) { 2164 if (self->retry_count < (self->N2 / 2)) {
@@ -2211,7 +2211,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2211 irlap_send_test_frame(self, self->caddr, info->daddr, skb); 2211 irlap_send_test_frame(self, self->caddr, info->daddr, skb);
2212 break; 2212 break;
2213 default: 2213 default:
2214 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, 2214 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
2215 event, irlap_event[event]); 2215 event, irlap_event[event]);
2216 2216
2217 ret = -EINVAL; 2217 ret = -EINVAL;
@@ -2228,7 +2228,7 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2228{ 2228{
2229 int ret = 0; 2229 int ret = 0;
2230 2230
2231 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 2231 IRDA_DEBUG(1, "%s()\n", __func__);
2232 2232
2233 IRDA_ASSERT(self != NULL, return -ENODEV;); 2233 IRDA_ASSERT(self != NULL, return -ENODEV;);
2234 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); 2234 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
@@ -2285,7 +2285,7 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2285 break; /* stay in SCLOSE */ 2285 break; /* stay in SCLOSE */
2286 } 2286 }
2287 2287
2288 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, 2288 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
2289 event, irlap_event[event]); 2289 event, irlap_event[event]);
2290 2290
2291 ret = -EINVAL; 2291 ret = -EINVAL;
@@ -2301,7 +2301,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
2301{ 2301{
2302 int ret = 0; 2302 int ret = 0;
2303 2303
2304 IRDA_DEBUG(1, "%s(), event=%s\n", __FUNCTION__, irlap_event[event]); 2304 IRDA_DEBUG(1, "%s(), event=%s\n", __func__, irlap_event[event]);
2305 2305
2306 IRDA_ASSERT(self != NULL, return -ENODEV;); 2306 IRDA_ASSERT(self != NULL, return -ENODEV;);
2307 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); 2307 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
@@ -2322,7 +2322,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
2322 irlap_next_state(self, LAP_SCLOSE); 2322 irlap_next_state(self, LAP_SCLOSE);
2323 break; 2323 break;
2324 default: 2324 default:
2325 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, 2325 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
2326 event, irlap_event[event]); 2326 event, irlap_event[event]);
2327 2327
2328 ret = -EINVAL; 2328 ret = -EINVAL;
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 7c132d6342af..90894534f3cc 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -102,7 +102,7 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb)
102 irlap_insert_info(self, skb); 102 irlap_insert_info(self, skb);
103 103
104 if (unlikely(self->mode & IRDA_MODE_MONITOR)) { 104 if (unlikely(self->mode & IRDA_MODE_MONITOR)) {
105 IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __FUNCTION__, 105 IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __func__,
106 self->netdev->name); 106 self->netdev->name);
107 dev_kfree_skb(skb); 107 dev_kfree_skb(skb);
108 return; 108 return;
@@ -182,7 +182,7 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb,
182 /* Check if the new connection address is valid */ 182 /* Check if the new connection address is valid */
183 if ((info->caddr == 0x00) || (info->caddr == 0xfe)) { 183 if ((info->caddr == 0x00) || (info->caddr == 0xfe)) {
184 IRDA_DEBUG(3, "%s(), invalid connection address!\n", 184 IRDA_DEBUG(3, "%s(), invalid connection address!\n",
185 __FUNCTION__); 185 __func__);
186 return; 186 return;
187 } 187 }
188 188
@@ -193,7 +193,7 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb,
193 /* Only accept if addressed directly to us */ 193 /* Only accept if addressed directly to us */
194 if (info->saddr != self->saddr) { 194 if (info->saddr != self->saddr) {
195 IRDA_DEBUG(2, "%s(), not addressed to us!\n", 195 IRDA_DEBUG(2, "%s(), not addressed to us!\n",
196 __FUNCTION__); 196 __func__);
197 return; 197 return;
198 } 198 }
199 irlap_do_event(self, RECV_SNRM_CMD, skb, info); 199 irlap_do_event(self, RECV_SNRM_CMD, skb, info);
@@ -215,7 +215,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
215 struct ua_frame *frame; 215 struct ua_frame *frame;
216 int ret; 216 int ret;
217 217
218 IRDA_DEBUG(2, "%s() <%ld>\n", __FUNCTION__, jiffies); 218 IRDA_DEBUG(2, "%s() <%ld>\n", __func__, jiffies);
219 219
220 IRDA_ASSERT(self != NULL, return;); 220 IRDA_ASSERT(self != NULL, return;);
221 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 221 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -290,7 +290,7 @@ void irlap_send_disc_frame(struct irlap_cb *self)
290 struct sk_buff *tx_skb = NULL; 290 struct sk_buff *tx_skb = NULL;
291 struct disc_frame *frame; 291 struct disc_frame *frame;
292 292
293 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 293 IRDA_DEBUG(3, "%s()\n", __func__);
294 294
295 IRDA_ASSERT(self != NULL, return;); 295 IRDA_ASSERT(self != NULL, return;);
296 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 296 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -321,7 +321,7 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
321 __u32 bcast = BROADCAST; 321 __u32 bcast = BROADCAST;
322 __u8 *info; 322 __u8 *info;
323 323
324 IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __FUNCTION__, 324 IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __func__,
325 s, S, command); 325 s, S, command);
326 326
327 IRDA_ASSERT(self != NULL, return;); 327 IRDA_ASSERT(self != NULL, return;);
@@ -414,13 +414,13 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
414 __u8 *discovery_info; 414 __u8 *discovery_info;
415 char *text; 415 char *text;
416 416
417 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 417 IRDA_DEBUG(4, "%s()\n", __func__);
418 418
419 IRDA_ASSERT(self != NULL, return;); 419 IRDA_ASSERT(self != NULL, return;);
420 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 420 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
421 421
422 if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { 422 if (!pskb_may_pull(skb, sizeof(struct xid_frame))) {
423 IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); 423 IRDA_ERROR("%s: frame too short!\n", __func__);
424 return; 424 return;
425 } 425 }
426 426
@@ -432,12 +432,12 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
432 /* Make sure frame is addressed to us */ 432 /* Make sure frame is addressed to us */
433 if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { 433 if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
434 IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", 434 IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n",
435 __FUNCTION__); 435 __func__);
436 return; 436 return;
437 } 437 }
438 438
439 if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { 439 if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
440 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__); 440 IRDA_WARNING("%s: kmalloc failed!\n", __func__);
441 return; 441 return;
442 } 442 }
443 443
@@ -445,7 +445,7 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
445 discovery->data.saddr = self->saddr; 445 discovery->data.saddr = self->saddr;
446 discovery->timestamp = jiffies; 446 discovery->timestamp = jiffies;
447 447
448 IRDA_DEBUG(4, "%s(), daddr=%08x\n", __FUNCTION__, 448 IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__,
449 discovery->data.daddr); 449 discovery->data.daddr);
450 450
451 discovery_info = skb_pull(skb, sizeof(struct xid_frame)); 451 discovery_info = skb_pull(skb, sizeof(struct xid_frame));
@@ -491,7 +491,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
491 char *text; 491 char *text;
492 492
493 if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { 493 if (!pskb_may_pull(skb, sizeof(struct xid_frame))) {
494 IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); 494 IRDA_ERROR("%s: frame too short!\n", __func__);
495 return; 495 return;
496 } 496 }
497 497
@@ -503,7 +503,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
503 /* Make sure frame is addressed to us */ 503 /* Make sure frame is addressed to us */
504 if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { 504 if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
505 IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", 505 IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n",
506 __FUNCTION__); 506 __func__);
507 return; 507 return;
508 } 508 }
509 509
@@ -536,7 +536,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
536 if((discovery_info == NULL) || 536 if((discovery_info == NULL) ||
537 !pskb_may_pull(skb, 3)) { 537 !pskb_may_pull(skb, 3)) {
538 IRDA_ERROR("%s: discovery frame too short!\n", 538 IRDA_ERROR("%s: discovery frame too short!\n",
539 __FUNCTION__); 539 __func__);
540 return; 540 return;
541 } 541 }
542 542
@@ -545,7 +545,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
545 */ 545 */
546 discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC); 546 discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
547 if (!discovery) { 547 if (!discovery) {
548 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); 548 IRDA_WARNING("%s: unable to malloc!\n", __func__);
549 return; 549 return;
550 } 550 }
551 551
@@ -657,7 +657,7 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb,
657{ 657{
658 info->nr = skb->data[1] >> 5; 658 info->nr = skb->data[1] >> 5;
659 659
660 IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __FUNCTION__, info->nr, jiffies); 660 IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __func__, info->nr, jiffies);
661 661
662 if (command) 662 if (command)
663 irlap_do_event(self, RECV_RNR_CMD, skb, info); 663 irlap_do_event(self, RECV_RNR_CMD, skb, info);
@@ -668,7 +668,7 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb,
668static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, 668static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb,
669 struct irlap_info *info, int command) 669 struct irlap_info *info, int command)
670{ 670{
671 IRDA_DEBUG(0, "%s()\n", __FUNCTION__); 671 IRDA_DEBUG(0, "%s()\n", __func__);
672 672
673 info->nr = skb->data[1] >> 5; 673 info->nr = skb->data[1] >> 5;
674 674
@@ -682,7 +682,7 @@ static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb,
682static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, 682static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb,
683 struct irlap_info *info, int command) 683 struct irlap_info *info, int command)
684{ 684{
685 IRDA_DEBUG(0, "%s()\n", __FUNCTION__); 685 IRDA_DEBUG(0, "%s()\n", __func__);
686 686
687 info->nr = skb->data[1] >> 5; 687 info->nr = skb->data[1] >> 5;
688 688
@@ -696,7 +696,7 @@ static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb,
696static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb, 696static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb,
697 struct irlap_info *info, int command) 697 struct irlap_info *info, int command)
698{ 698{
699 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 699 IRDA_DEBUG(2, "%s()\n", __func__);
700 700
701 /* Check if this is a command or a response frame */ 701 /* Check if this is a command or a response frame */
702 if (command) 702 if (command)
@@ -755,7 +755,7 @@ void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb)
755 755
756 irlap_send_i_frame( self, tx_skb, CMD_FRAME); 756 irlap_send_i_frame( self, tx_skb, CMD_FRAME);
757 } else { 757 } else {
758 IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__); 758 IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__);
759 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); 759 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
760 self->window -= 1; 760 self->window -= 1;
761 } 761 }
@@ -808,7 +808,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
808 irlap_next_state(self, LAP_NRM_P); 808 irlap_next_state(self, LAP_NRM_P);
809 irlap_send_i_frame(self, tx_skb, CMD_FRAME); 809 irlap_send_i_frame(self, tx_skb, CMD_FRAME);
810 } else { 810 } else {
811 IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__); 811 IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__);
812 812
813 if (self->ack_required) { 813 if (self->ack_required) {
814 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); 814 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
@@ -835,7 +835,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
835 * See max_line_capacities[][] in qos.c for details. Jean II */ 835 * See max_line_capacities[][] in qos.c for details. Jean II */
836 transmission_time -= (self->final_timeout * self->bytes_left 836 transmission_time -= (self->final_timeout * self->bytes_left
837 / self->line_capacity); 837 / self->line_capacity);
838 IRDA_DEBUG(4, "%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", __FUNCTION__, self->final_timeout, self->bytes_left, self->line_capacity, transmission_time); 838 IRDA_DEBUG(4, "%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", __func__, self->final_timeout, self->bytes_left, self->line_capacity, transmission_time);
839 839
840 /* We are allowed to transmit a maximum number of bytes again. */ 840 /* We are allowed to transmit a maximum number of bytes again. */
841 self->bytes_left = self->line_capacity; 841 self->bytes_left = self->line_capacity;
@@ -1001,7 +1001,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
1001 /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ 1001 /* tx_skb = skb_clone( skb, GFP_ATOMIC); */
1002 tx_skb = skb_copy(skb, GFP_ATOMIC); 1002 tx_skb = skb_copy(skb, GFP_ATOMIC);
1003 if (!tx_skb) { 1003 if (!tx_skb) {
1004 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); 1004 IRDA_DEBUG(0, "%s(), unable to copy\n", __func__);
1005 return; 1005 return;
1006 } 1006 }
1007 1007
@@ -1033,7 +1033,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
1033 */ 1033 */
1034 while (!skb_queue_empty(&self->txq)) { 1034 while (!skb_queue_empty(&self->txq)) {
1035 1035
1036 IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__); 1036 IRDA_DEBUG(0, "%s(), sending additional frames!\n", __func__);
1037 if (self->window > 0) { 1037 if (self->window > 0) {
1038 skb = skb_dequeue( &self->txq); 1038 skb = skb_dequeue( &self->txq);
1039 IRDA_ASSERT(skb != NULL, return;); 1039 IRDA_ASSERT(skb != NULL, return;);
@@ -1073,7 +1073,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
1073 /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ 1073 /* tx_skb = skb_clone( skb, GFP_ATOMIC); */
1074 tx_skb = skb_copy(skb, GFP_ATOMIC); 1074 tx_skb = skb_copy(skb, GFP_ATOMIC);
1075 if (!tx_skb) { 1075 if (!tx_skb) {
1076 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); 1076 IRDA_DEBUG(0, "%s(), unable to copy\n", __func__);
1077 return; 1077 return;
1078 } 1078 }
1079 1079
@@ -1096,7 +1096,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
1096void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, 1096void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
1097 __u8 caddr, int command) 1097 __u8 caddr, int command)
1098{ 1098{
1099 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1099 IRDA_DEBUG(4, "%s()\n", __func__);
1100 1100
1101 IRDA_ASSERT(self != NULL, return;); 1101 IRDA_ASSERT(self != NULL, return;);
1102 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 1102 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -1156,7 +1156,7 @@ static inline void irlap_recv_i_frame(struct irlap_cb *self,
1156static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb, 1156static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
1157 struct irlap_info *info) 1157 struct irlap_info *info)
1158{ 1158{
1159 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 1159 IRDA_DEBUG( 4, "%s()\n", __func__);
1160 1160
1161 info->pf = skb->data[1] & PF_BIT; /* Final bit */ 1161 info->pf = skb->data[1] & PF_BIT; /* Final bit */
1162 1162
@@ -1175,7 +1175,7 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb,
1175 __u8 *frame; 1175 __u8 *frame;
1176 int w, x, y, z; 1176 int w, x, y, z;
1177 1177
1178 IRDA_DEBUG(0, "%s()\n", __FUNCTION__); 1178 IRDA_DEBUG(0, "%s()\n", __func__);
1179 1179
1180 IRDA_ASSERT(self != NULL, return;); 1180 IRDA_ASSERT(self != NULL, return;);
1181 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 1181 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -1183,7 +1183,7 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb,
1183 IRDA_ASSERT(info != NULL, return;); 1183 IRDA_ASSERT(info != NULL, return;);
1184 1184
1185 if (!pskb_may_pull(skb, 4)) { 1185 if (!pskb_may_pull(skb, 4)) {
1186 IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); 1186 IRDA_ERROR("%s: frame too short!\n", __func__);
1187 return; 1187 return;
1188 } 1188 }
1189 1189
@@ -1269,10 +1269,10 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
1269{ 1269{
1270 struct test_frame *frame; 1270 struct test_frame *frame;
1271 1271
1272 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 1272 IRDA_DEBUG(2, "%s()\n", __func__);
1273 1273
1274 if (!pskb_may_pull(skb, sizeof(*frame))) { 1274 if (!pskb_may_pull(skb, sizeof(*frame))) {
1275 IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); 1275 IRDA_ERROR("%s: frame too short!\n", __func__);
1276 return; 1276 return;
1277 } 1277 }
1278 frame = (struct test_frame *) skb->data; 1278 frame = (struct test_frame *) skb->data;
@@ -1281,7 +1281,7 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
1281 if (info->caddr == CBROADCAST) { 1281 if (info->caddr == CBROADCAST) {
1282 if (skb->len < sizeof(struct test_frame)) { 1282 if (skb->len < sizeof(struct test_frame)) {
1283 IRDA_DEBUG(0, "%s() test frame too short!\n", 1283 IRDA_DEBUG(0, "%s() test frame too short!\n",
1284 __FUNCTION__); 1284 __func__);
1285 return; 1285 return;
1286 } 1286 }
1287 1287
@@ -1326,7 +1326,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
1326 int command; 1326 int command;
1327 __u8 control; 1327 __u8 control;
1328 1328
1329 if (dev->nd_net != &init_net) 1329 if (dev_net(dev) != &init_net)
1330 goto out; 1330 goto out;
1331 1331
1332 /* FIXME: should we get our own field? */ 1332 /* FIXME: should we get our own field? */
@@ -1342,14 +1342,14 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
1342 * share and non linear skbs. This should never happen, so 1342 * share and non linear skbs. This should never happen, so
1343 * we don't need to be clever about it. Jean II */ 1343 * we don't need to be clever about it. Jean II */
1344 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 1344 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
1345 IRDA_ERROR("%s: can't clone shared skb!\n", __FUNCTION__); 1345 IRDA_ERROR("%s: can't clone shared skb!\n", __func__);
1346 dev_kfree_skb(skb); 1346 dev_kfree_skb(skb);
1347 return -1; 1347 return -1;
1348 } 1348 }
1349 1349
1350 /* Check if frame is large enough for parsing */ 1350 /* Check if frame is large enough for parsing */
1351 if (!pskb_may_pull(skb, 2)) { 1351 if (!pskb_may_pull(skb, 2)) {
1352 IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); 1352 IRDA_ERROR("%s: frame too short!\n", __func__);
1353 dev_kfree_skb(skb); 1353 dev_kfree_skb(skb);
1354 return -1; 1354 return -1;
1355 } 1355 }
@@ -1365,7 +1365,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
1365 /* First we check if this frame has a valid connection address */ 1365 /* First we check if this frame has a valid connection address */
1366 if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) { 1366 if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) {
1367 IRDA_DEBUG(0, "%s(), wrong connection address!\n", 1367 IRDA_DEBUG(0, "%s(), wrong connection address!\n",
1368 __FUNCTION__); 1368 __func__);
1369 goto out; 1369 goto out;
1370 } 1370 }
1371 /* 1371 /*
@@ -1400,7 +1400,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
1400 break; 1400 break;
1401 default: 1401 default:
1402 IRDA_WARNING("%s: Unknown S-frame %02x received!\n", 1402 IRDA_WARNING("%s: Unknown S-frame %02x received!\n",
1403 __FUNCTION__, info.control); 1403 __func__, info.control);
1404 break; 1404 break;
1405 } 1405 }
1406 goto out; 1406 goto out;
@@ -1438,7 +1438,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
1438 break; 1438 break;
1439 default: 1439 default:
1440 IRDA_WARNING("%s: Unknown frame %02x received!\n", 1440 IRDA_WARNING("%s: Unknown frame %02x received!\n",
1441 __FUNCTION__, info.control); 1441 __func__, info.control);
1442 break; 1442 break;
1443 } 1443 }
1444out: 1444out:
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 135ac6907bbf..1f81f8e7c61d 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -76,7 +76,7 @@ const char *irlmp_reasons[] = {
76 */ 76 */
77int __init irlmp_init(void) 77int __init irlmp_init(void)
78{ 78{
79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 79 IRDA_DEBUG(1, "%s()\n", __func__);
80 /* Initialize the irlmp structure. */ 80 /* Initialize the irlmp structure. */
81 irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); 81 irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
82 if (irlmp == NULL) 82 if (irlmp == NULL)
@@ -164,7 +164,7 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
164 /* Allocate new instance of a LSAP connection */ 164 /* Allocate new instance of a LSAP connection */
165 self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); 165 self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
166 if (self == NULL) { 166 if (self == NULL) {
167 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__); 167 IRDA_ERROR("%s: can't allocate memory\n", __func__);
168 return NULL; 168 return NULL;
169 } 169 }
170 170
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(irlmp_open_lsap);
202 */ 202 */
203static void __irlmp_close_lsap(struct lsap_cb *self) 203static void __irlmp_close_lsap(struct lsap_cb *self)
204{ 204{
205 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 205 IRDA_DEBUG(4, "%s()\n", __func__);
206 206
207 IRDA_ASSERT(self != NULL, return;); 207 IRDA_ASSERT(self != NULL, return;);
208 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); 208 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -264,7 +264,7 @@ void irlmp_close_lsap(struct lsap_cb *self)
264 if (!lsap) { 264 if (!lsap) {
265 IRDA_DEBUG(0, 265 IRDA_DEBUG(0,
266 "%s(), Looks like somebody has removed me already!\n", 266 "%s(), Looks like somebody has removed me already!\n",
267 __FUNCTION__); 267 __func__);
268 return; 268 return;
269 } 269 }
270 __irlmp_close_lsap(self); 270 __irlmp_close_lsap(self);
@@ -291,7 +291,7 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
291 */ 291 */
292 lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); 292 lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL);
293 if (lap == NULL) { 293 if (lap == NULL) {
294 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__); 294 IRDA_ERROR("%s: unable to kmalloc\n", __func__);
295 return; 295 return;
296 } 296 }
297 297
@@ -304,7 +304,7 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
304#endif 304#endif
305 lap->lsaps = hashbin_new(HB_LOCK); 305 lap->lsaps = hashbin_new(HB_LOCK);
306 if (lap->lsaps == NULL) { 306 if (lap->lsaps == NULL) {
307 IRDA_WARNING("%s(), unable to kmalloc lsaps\n", __FUNCTION__); 307 IRDA_WARNING("%s(), unable to kmalloc lsaps\n", __func__);
308 kfree(lap); 308 kfree(lap);
309 return; 309 return;
310 } 310 }
@@ -336,7 +336,7 @@ void irlmp_unregister_link(__u32 saddr)
336{ 336{
337 struct lap_cb *link; 337 struct lap_cb *link;
338 338
339 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 339 IRDA_DEBUG(4, "%s()\n", __func__);
340 340
341 /* We must remove ourselves from the hashbin *first*. This ensure 341 /* We must remove ourselves from the hashbin *first*. This ensure
342 * that no more LSAPs will be open on this link and no discovery 342 * that no more LSAPs will be open on this link and no discovery
@@ -381,7 +381,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
381 381
382 IRDA_DEBUG(2, 382 IRDA_DEBUG(2,
383 "%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n", 383 "%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n",
384 __FUNCTION__, self->slsap_sel, dlsap_sel, saddr, daddr); 384 __func__, self->slsap_sel, dlsap_sel, saddr, daddr);
385 385
386 if (test_bit(0, &self->connected)) { 386 if (test_bit(0, &self->connected)) {
387 ret = -EISCONN; 387 ret = -EISCONN;
@@ -425,7 +425,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
425 if (daddr != DEV_ADDR_ANY) 425 if (daddr != DEV_ADDR_ANY)
426 discovery = hashbin_find(irlmp->cachelog, daddr, NULL); 426 discovery = hashbin_find(irlmp->cachelog, daddr, NULL);
427 else { 427 else {
428 IRDA_DEBUG(2, "%s(), no daddr\n", __FUNCTION__); 428 IRDA_DEBUG(2, "%s(), no daddr\n", __func__);
429 discovery = (discovery_t *) 429 discovery = (discovery_t *)
430 hashbin_get_first(irlmp->cachelog); 430 hashbin_get_first(irlmp->cachelog);
431 } 431 }
@@ -438,7 +438,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
438 } 438 }
439 lap = hashbin_lock_find(irlmp->links, saddr, NULL); 439 lap = hashbin_lock_find(irlmp->links, saddr, NULL);
440 if (lap == NULL) { 440 if (lap == NULL) {
441 IRDA_DEBUG(1, "%s(), Unable to find a usable link!\n", __FUNCTION__); 441 IRDA_DEBUG(1, "%s(), Unable to find a usable link!\n", __func__);
442 ret = -EHOSTUNREACH; 442 ret = -EHOSTUNREACH;
443 goto err; 443 goto err;
444 } 444 }
@@ -453,14 +453,14 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
453 * disconnected yet (waiting for timeout in LAP). 453 * disconnected yet (waiting for timeout in LAP).
454 * Maybe we could give LAP a bit of help in this case. 454 * Maybe we could give LAP a bit of help in this case.
455 */ 455 */
456 IRDA_DEBUG(0, "%s(), sorry, but I'm waiting for LAP to timeout!\n", __FUNCTION__); 456 IRDA_DEBUG(0, "%s(), sorry, but I'm waiting for LAP to timeout!\n", __func__);
457 ret = -EAGAIN; 457 ret = -EAGAIN;
458 goto err; 458 goto err;
459 } 459 }
460 460
461 /* LAP is already connected to a different node, and LAP 461 /* LAP is already connected to a different node, and LAP
462 * can only talk to one node at a time */ 462 * can only talk to one node at a time */
463 IRDA_DEBUG(0, "%s(), sorry, but link is busy!\n", __FUNCTION__); 463 IRDA_DEBUG(0, "%s(), sorry, but link is busy!\n", __func__);
464 ret = -EBUSY; 464 ret = -EBUSY;
465 goto err; 465 goto err;
466 } 466 }
@@ -522,7 +522,7 @@ void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb)
522 IRDA_ASSERT(self->lap != NULL, return;); 522 IRDA_ASSERT(self->lap != NULL, return;);
523 523
524 IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", 524 IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
525 __FUNCTION__, self->slsap_sel, self->dlsap_sel); 525 __func__, self->slsap_sel, self->dlsap_sel);
526 526
527 /* Note : self->lap is set in irlmp_link_data_indication(), 527 /* Note : self->lap is set in irlmp_link_data_indication(),
528 * (case CONNECT_CMD:) because we have no way to set it here. 528 * (case CONNECT_CMD:) because we have no way to set it here.
@@ -563,7 +563,7 @@ int irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata)
563 * in the state machine itself. Jean II */ 563 * in the state machine itself. Jean II */
564 564
565 IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", 565 IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
566 __FUNCTION__, self->slsap_sel, self->dlsap_sel); 566 __func__, self->slsap_sel, self->dlsap_sel);
567 567
568 /* Make room for MUX control header (3 bytes) */ 568 /* Make room for MUX control header (3 bytes) */
569 IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;); 569 IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;);
@@ -589,7 +589,7 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
589 int lap_header_size; 589 int lap_header_size;
590 int max_seg_size; 590 int max_seg_size;
591 591
592 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 592 IRDA_DEBUG(3, "%s()\n", __func__);
593 593
594 IRDA_ASSERT(skb != NULL, return;); 594 IRDA_ASSERT(skb != NULL, return;);
595 IRDA_ASSERT(self != NULL, return;); 595 IRDA_ASSERT(self != NULL, return;);
@@ -603,7 +603,7 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
603 max_header_size = LMP_HEADER + lap_header_size; 603 max_header_size = LMP_HEADER + lap_header_size;
604 604
605 IRDA_DEBUG(2, "%s(), max_header_size=%d\n", 605 IRDA_DEBUG(2, "%s(), max_header_size=%d\n",
606 __FUNCTION__, max_header_size); 606 __func__, max_header_size);
607 607
608 /* Hide LMP_CONTROL_HEADER header from layer above */ 608 /* Hide LMP_CONTROL_HEADER header from layer above */
609 skb_pull(skb, LMP_CONTROL_HEADER); 609 skb_pull(skb, LMP_CONTROL_HEADER);
@@ -629,7 +629,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
629 struct lsap_cb *new; 629 struct lsap_cb *new;
630 unsigned long flags; 630 unsigned long flags;
631 631
632 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 632 IRDA_DEBUG(1, "%s()\n", __func__);
633 633
634 spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); 634 spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
635 635
@@ -638,7 +638,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
638 if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) || 638 if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) ||
639 (orig->lap == NULL)) { 639 (orig->lap == NULL)) {
640 IRDA_DEBUG(0, "%s(), invalid LSAP (wrong state)\n", 640 IRDA_DEBUG(0, "%s(), invalid LSAP (wrong state)\n",
641 __FUNCTION__); 641 __func__);
642 spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, 642 spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
643 flags); 643 flags);
644 return NULL; 644 return NULL;
@@ -647,7 +647,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
647 /* Allocate a new instance */ 647 /* Allocate a new instance */
648 new = kmemdup(orig, sizeof(*new), GFP_ATOMIC); 648 new = kmemdup(orig, sizeof(*new), GFP_ATOMIC);
649 if (!new) { 649 if (!new) {
650 IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__); 650 IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
651 spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, 651 spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
652 flags); 652 flags);
653 return NULL; 653 return NULL;
@@ -693,7 +693,7 @@ int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata)
693 * and us that might mess up the hashbins below. This fixes it. 693 * and us that might mess up the hashbins below. This fixes it.
694 * Jean II */ 694 * Jean II */
695 if (! test_and_clear_bit(0, &self->connected)) { 695 if (! test_and_clear_bit(0, &self->connected)) {
696 IRDA_DEBUG(0, "%s(), already disconnected!\n", __FUNCTION__); 696 IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__);
697 dev_kfree_skb(userdata); 697 dev_kfree_skb(userdata);
698 return -1; 698 return -1;
699 } 699 }
@@ -747,19 +747,19 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
747{ 747{
748 struct lsap_cb *lsap; 748 struct lsap_cb *lsap;
749 749
750 IRDA_DEBUG(1, "%s(), reason=%s\n", __FUNCTION__, irlmp_reasons[reason]); 750 IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]);
751 IRDA_ASSERT(self != NULL, return;); 751 IRDA_ASSERT(self != NULL, return;);
752 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); 752 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
753 753
754 IRDA_DEBUG(3, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", 754 IRDA_DEBUG(3, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
755 __FUNCTION__, self->slsap_sel, self->dlsap_sel); 755 __func__, self->slsap_sel, self->dlsap_sel);
756 756
757 /* Already disconnected ? 757 /* Already disconnected ?
758 * There is a race condition between irlmp_disconnect_request() 758 * There is a race condition between irlmp_disconnect_request()
759 * and us that might mess up the hashbins below. This fixes it. 759 * and us that might mess up the hashbins below. This fixes it.
760 * Jean II */ 760 * Jean II */
761 if (! test_and_clear_bit(0, &self->connected)) { 761 if (! test_and_clear_bit(0, &self->connected)) {
762 IRDA_DEBUG(0, "%s(), already disconnected!\n", __FUNCTION__); 762 IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__);
763 return; 763 return;
764 } 764 }
765 765
@@ -792,7 +792,7 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
792 self->notify.disconnect_indication(self->notify.instance, 792 self->notify.disconnect_indication(self->notify.instance,
793 self, reason, skb); 793 self, reason, skb);
794 } else { 794 } else {
795 IRDA_DEBUG(0, "%s(), no handler\n", __FUNCTION__); 795 IRDA_DEBUG(0, "%s(), no handler\n", __func__);
796 } 796 }
797} 797}
798 798
@@ -845,7 +845,7 @@ void irlmp_do_discovery(int nslots)
845 /* Make sure the value is sane */ 845 /* Make sure the value is sane */
846 if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){ 846 if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){
847 IRDA_WARNING("%s: invalid value for number of slots!\n", 847 IRDA_WARNING("%s: invalid value for number of slots!\n",
848 __FUNCTION__); 848 __func__);
849 nslots = sysctl_discovery_slots = 8; 849 nslots = sysctl_discovery_slots = 8;
850 } 850 }
851 851
@@ -963,7 +963,7 @@ irlmp_notify_client(irlmp_client_t *client,
963 int number; /* Number of nodes in the log */ 963 int number; /* Number of nodes in the log */
964 int i; 964 int i;
965 965
966 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 966 IRDA_DEBUG(3, "%s()\n", __func__);
967 967
968 /* Check if client wants or not partial/selective log (optimisation) */ 968 /* Check if client wants or not partial/selective log (optimisation) */
969 if (!client->disco_callback) 969 if (!client->disco_callback)
@@ -1014,7 +1014,7 @@ void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode)
1014 irlmp_client_t *client; 1014 irlmp_client_t *client;
1015 irlmp_client_t *client_next; 1015 irlmp_client_t *client_next;
1016 1016
1017 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1017 IRDA_DEBUG(3, "%s()\n", __func__);
1018 1018
1019 IRDA_ASSERT(log != NULL, return;); 1019 IRDA_ASSERT(log != NULL, return;);
1020 1020
@@ -1049,7 +1049,7 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number)
1049 irlmp_client_t *client_next; 1049 irlmp_client_t *client_next;
1050 int i; 1050 int i;
1051 1051
1052 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1052 IRDA_DEBUG(3, "%s()\n", __func__);
1053 1053
1054 IRDA_ASSERT(expiries != NULL, return;); 1054 IRDA_ASSERT(expiries != NULL, return;);
1055 1055
@@ -1082,7 +1082,7 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number)
1082 */ 1082 */
1083discovery_t *irlmp_get_discovery_response(void) 1083discovery_t *irlmp_get_discovery_response(void)
1084{ 1084{
1085 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1085 IRDA_DEBUG(4, "%s()\n", __func__);
1086 1086
1087 IRDA_ASSERT(irlmp != NULL, return NULL;); 1087 IRDA_ASSERT(irlmp != NULL, return NULL;);
1088 1088
@@ -1160,7 +1160,7 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata)
1160{ 1160{
1161 int ret; 1161 int ret;
1162 1162
1163 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1163 IRDA_DEBUG(4, "%s()\n", __func__);
1164 1164
1165 IRDA_ASSERT(userdata != NULL, return -1;); 1165 IRDA_ASSERT(userdata != NULL, return -1;);
1166 1166
@@ -1184,7 +1184,7 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata)
1184 */ 1184 */
1185void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb) 1185void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb)
1186{ 1186{
1187 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1187 IRDA_DEBUG(4, "%s()\n", __func__);
1188 1188
1189 IRDA_ASSERT(self != NULL, return;); 1189 IRDA_ASSERT(self != NULL, return;);
1190 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); 1190 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -1211,7 +1211,7 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata,
1211 struct sk_buff *clone_skb; 1211 struct sk_buff *clone_skb;
1212 struct lap_cb *lap; 1212 struct lap_cb *lap;
1213 1213
1214 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1214 IRDA_DEBUG(4, "%s()\n", __func__);
1215 1215
1216 IRDA_ASSERT(userdata != NULL, return -1;); 1216 IRDA_ASSERT(userdata != NULL, return -1;);
1217 1217
@@ -1262,7 +1262,7 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata,
1262#ifdef CONFIG_IRDA_ULTRA 1262#ifdef CONFIG_IRDA_ULTRA
1263void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb) 1263void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb)
1264{ 1264{
1265 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1265 IRDA_DEBUG(4, "%s()\n", __func__);
1266 1266
1267 IRDA_ASSERT(self != NULL, return;); 1267 IRDA_ASSERT(self != NULL, return;);
1268 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); 1268 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -1305,7 +1305,7 @@ void irlmp_status_indication(struct lap_cb *self,
1305 curr->notify.status_indication(curr->notify.instance, 1305 curr->notify.status_indication(curr->notify.instance,
1306 link, lock); 1306 link, lock);
1307 else 1307 else
1308 IRDA_DEBUG(2, "%s(), no handler\n", __FUNCTION__); 1308 IRDA_DEBUG(2, "%s(), no handler\n", __func__);
1309 1309
1310 curr = next; 1310 curr = next;
1311 } 1311 }
@@ -1333,7 +1333,7 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow)
1333 /* Get the number of lsap. That's the only safe way to know 1333 /* Get the number of lsap. That's the only safe way to know
1334 * that we have looped around... - Jean II */ 1334 * that we have looped around... - Jean II */
1335 lsap_todo = HASHBIN_GET_SIZE(self->lsaps); 1335 lsap_todo = HASHBIN_GET_SIZE(self->lsaps);
1336 IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __FUNCTION__, lsap_todo); 1336 IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __func__, lsap_todo);
1337 1337
1338 /* Poll lsap in order until the queue is full or until we 1338 /* Poll lsap in order until the queue is full or until we
1339 * tried them all. 1339 * tried them all.
@@ -1352,14 +1352,14 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow)
1352 /* Uh-oh... Paranoia */ 1352 /* Uh-oh... Paranoia */
1353 if(curr == NULL) 1353 if(curr == NULL)
1354 break; 1354 break;
1355 IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __FUNCTION__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap)); 1355 IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __func__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap));
1356 1356
1357 /* Inform lsap user that it can send one more packet. */ 1357 /* Inform lsap user that it can send one more packet. */
1358 if (curr->notify.flow_indication != NULL) 1358 if (curr->notify.flow_indication != NULL)
1359 curr->notify.flow_indication(curr->notify.instance, 1359 curr->notify.flow_indication(curr->notify.instance,
1360 curr, flow); 1360 curr, flow);
1361 else 1361 else
1362 IRDA_DEBUG(1, "%s(), no handler\n", __FUNCTION__); 1362 IRDA_DEBUG(1, "%s(), no handler\n", __func__);
1363 } 1363 }
1364} 1364}
1365 1365
@@ -1381,7 +1381,7 @@ __u8 *irlmp_hint_to_service(__u8 *hint)
1381 */ 1381 */
1382 service = kmalloc(16, GFP_ATOMIC); 1382 service = kmalloc(16, GFP_ATOMIC);
1383 if (!service) { 1383 if (!service) {
1384 IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __FUNCTION__); 1384 IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__);
1385 return NULL; 1385 return NULL;
1386 } 1386 }
1387 1387
@@ -1482,12 +1482,12 @@ void *irlmp_register_service(__u16 hints)
1482{ 1482{
1483 irlmp_service_t *service; 1483 irlmp_service_t *service;
1484 1484
1485 IRDA_DEBUG(4, "%s(), hints = %04x\n", __FUNCTION__, hints); 1485 IRDA_DEBUG(4, "%s(), hints = %04x\n", __func__, hints);
1486 1486
1487 /* Make a new registration */ 1487 /* Make a new registration */
1488 service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC); 1488 service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC);
1489 if (!service) { 1489 if (!service) {
1490 IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __FUNCTION__); 1490 IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__);
1491 return NULL; 1491 return NULL;
1492 } 1492 }
1493 service->hints.word = hints; 1493 service->hints.word = hints;
@@ -1512,7 +1512,7 @@ int irlmp_unregister_service(void *handle)
1512 irlmp_service_t *service; 1512 irlmp_service_t *service;
1513 unsigned long flags; 1513 unsigned long flags;
1514 1514
1515 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1515 IRDA_DEBUG(4, "%s()\n", __func__);
1516 1516
1517 if (!handle) 1517 if (!handle)
1518 return -1; 1518 return -1;
@@ -1520,7 +1520,7 @@ int irlmp_unregister_service(void *handle)
1520 /* Caller may call with invalid handle (it's legal) - Jean II */ 1520 /* Caller may call with invalid handle (it's legal) - Jean II */
1521 service = hashbin_lock_find(irlmp->services, (long) handle, NULL); 1521 service = hashbin_lock_find(irlmp->services, (long) handle, NULL);
1522 if (!service) { 1522 if (!service) {
1523 IRDA_DEBUG(1, "%s(), Unknown service!\n", __FUNCTION__); 1523 IRDA_DEBUG(1, "%s(), Unknown service!\n", __func__);
1524 return -1; 1524 return -1;
1525 } 1525 }
1526 1526
@@ -1557,13 +1557,13 @@ void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
1557{ 1557{
1558 irlmp_client_t *client; 1558 irlmp_client_t *client;
1559 1559
1560 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1560 IRDA_DEBUG(1, "%s()\n", __func__);
1561 IRDA_ASSERT(irlmp != NULL, return NULL;); 1561 IRDA_ASSERT(irlmp != NULL, return NULL;);
1562 1562
1563 /* Make a new registration */ 1563 /* Make a new registration */
1564 client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC); 1564 client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC);
1565 if (!client) { 1565 if (!client) {
1566 IRDA_DEBUG( 1, "%s(), Unable to kmalloc!\n", __FUNCTION__); 1566 IRDA_DEBUG( 1, "%s(), Unable to kmalloc!\n", __func__);
1567 return NULL; 1567 return NULL;
1568 } 1568 }
1569 1569
@@ -1599,7 +1599,7 @@ int irlmp_update_client(void *handle, __u16 hint_mask,
1599 1599
1600 client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); 1600 client = hashbin_lock_find(irlmp->clients, (long) handle, NULL);
1601 if (!client) { 1601 if (!client) {
1602 IRDA_DEBUG(1, "%s(), Unknown client!\n", __FUNCTION__); 1602 IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__);
1603 return -1; 1603 return -1;
1604 } 1604 }
1605 1605
@@ -1622,7 +1622,7 @@ int irlmp_unregister_client(void *handle)
1622{ 1622{
1623 struct irlmp_client *client; 1623 struct irlmp_client *client;
1624 1624
1625 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1625 IRDA_DEBUG(4, "%s()\n", __func__);
1626 1626
1627 if (!handle) 1627 if (!handle)
1628 return -1; 1628 return -1;
@@ -1630,11 +1630,11 @@ int irlmp_unregister_client(void *handle)
1630 /* Caller may call with invalid handle (it's legal) - Jean II */ 1630 /* Caller may call with invalid handle (it's legal) - Jean II */
1631 client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); 1631 client = hashbin_lock_find(irlmp->clients, (long) handle, NULL);
1632 if (!client) { 1632 if (!client) {
1633 IRDA_DEBUG(1, "%s(), Unknown client!\n", __FUNCTION__); 1633 IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__);
1634 return -1; 1634 return -1;
1635 } 1635 }
1636 1636
1637 IRDA_DEBUG(4, "%s(), removing client!\n", __FUNCTION__); 1637 IRDA_DEBUG(4, "%s(), removing client!\n", __func__);
1638 hashbin_remove_this(irlmp->clients, (irda_queue_t *) client); 1638 hashbin_remove_this(irlmp->clients, (irda_queue_t *) client);
1639 kfree(client); 1639 kfree(client);
1640 1640
@@ -1663,7 +1663,7 @@ static int irlmp_slsap_inuse(__u8 slsap_sel)
1663 IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;); 1663 IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;);
1664 IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;); 1664 IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;);
1665 1665
1666 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1666 IRDA_DEBUG(4, "%s()\n", __func__);
1667 1667
1668#ifdef CONFIG_IRDA_ULTRA 1668#ifdef CONFIG_IRDA_ULTRA
1669 /* Accept all bindings to the connectionless LSAP */ 1669 /* Accept all bindings to the connectionless LSAP */
@@ -1790,7 +1790,7 @@ static __u8 irlmp_find_free_slsap(void)
1790 /* Make sure we terminate the loop */ 1790 /* Make sure we terminate the loop */
1791 if (wrapped++) { 1791 if (wrapped++) {
1792 IRDA_ERROR("%s: no more free LSAPs !\n", 1792 IRDA_ERROR("%s: no more free LSAPs !\n",
1793 __FUNCTION__); 1793 __func__);
1794 return 0; 1794 return 0;
1795 } 1795 }
1796 } 1796 }
@@ -1805,7 +1805,7 @@ static __u8 irlmp_find_free_slsap(void)
1805 /* Got it ! */ 1805 /* Got it ! */
1806 lsap_sel = irlmp->last_lsap_sel; 1806 lsap_sel = irlmp->last_lsap_sel;
1807 IRDA_DEBUG(4, "%s(), found free lsap_sel=%02x\n", 1807 IRDA_DEBUG(4, "%s(), found free lsap_sel=%02x\n",
1808 __FUNCTION__, lsap_sel); 1808 __func__, lsap_sel);
1809 1809
1810 return lsap_sel; 1810 return lsap_sel;
1811} 1811}
@@ -1823,26 +1823,26 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason)
1823 1823
1824 switch (lap_reason) { 1824 switch (lap_reason) {
1825 case LAP_DISC_INDICATION: /* Received a disconnect request from peer */ 1825 case LAP_DISC_INDICATION: /* Received a disconnect request from peer */
1826 IRDA_DEBUG( 1, "%s(), LAP_DISC_INDICATION\n", __FUNCTION__); 1826 IRDA_DEBUG( 1, "%s(), LAP_DISC_INDICATION\n", __func__);
1827 reason = LM_USER_REQUEST; 1827 reason = LM_USER_REQUEST;
1828 break; 1828 break;
1829 case LAP_NO_RESPONSE: /* To many retransmits without response */ 1829 case LAP_NO_RESPONSE: /* To many retransmits without response */
1830 IRDA_DEBUG( 1, "%s(), LAP_NO_RESPONSE\n", __FUNCTION__); 1830 IRDA_DEBUG( 1, "%s(), LAP_NO_RESPONSE\n", __func__);
1831 reason = LM_LAP_DISCONNECT; 1831 reason = LM_LAP_DISCONNECT;
1832 break; 1832 break;
1833 case LAP_RESET_INDICATION: 1833 case LAP_RESET_INDICATION:
1834 IRDA_DEBUG( 1, "%s(), LAP_RESET_INDICATION\n", __FUNCTION__); 1834 IRDA_DEBUG( 1, "%s(), LAP_RESET_INDICATION\n", __func__);
1835 reason = LM_LAP_RESET; 1835 reason = LM_LAP_RESET;
1836 break; 1836 break;
1837 case LAP_FOUND_NONE: 1837 case LAP_FOUND_NONE:
1838 case LAP_MEDIA_BUSY: 1838 case LAP_MEDIA_BUSY:
1839 case LAP_PRIMARY_CONFLICT: 1839 case LAP_PRIMARY_CONFLICT:
1840 IRDA_DEBUG(1, "%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", __FUNCTION__); 1840 IRDA_DEBUG(1, "%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", __func__);
1841 reason = LM_CONNECT_FAILURE; 1841 reason = LM_CONNECT_FAILURE;
1842 break; 1842 break;
1843 default: 1843 default:
1844 IRDA_DEBUG(1, "%s(), Unknow IrLAP disconnect reason %d!\n", 1844 IRDA_DEBUG(1, "%s(), Unknow IrLAP disconnect reason %d!\n",
1845 __FUNCTION__, lap_reason); 1845 __func__, lap_reason);
1846 reason = LM_LAP_DISCONNECT; 1846 reason = LM_LAP_DISCONNECT;
1847 break; 1847 break;
1848 } 1848 }
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index 150cd3f1129a..78cce0cb073f 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -120,7 +120,7 @@ static inline void irlmp_next_lap_state(struct lap_cb *self,
120 IRLMP_STATE state) 120 IRLMP_STATE state)
121{ 121{
122 /* 122 /*
123 IRDA_DEBUG(4, "%s(), LMP LAP = %s\n", __FUNCTION__, irlmp_state[state]); 123 IRDA_DEBUG(4, "%s(), LMP LAP = %s\n", __func__, irlmp_state[state]);
124 */ 124 */
125 self->lap_state = state; 125 self->lap_state = state;
126} 126}
@@ -130,7 +130,7 @@ static inline void irlmp_next_lsap_state(struct lsap_cb *self,
130{ 130{
131 /* 131 /*
132 IRDA_ASSERT(self != NULL, return;); 132 IRDA_ASSERT(self != NULL, return;);
133 IRDA_DEBUG(4, "%s(), LMP LSAP = %s\n", __FUNCTION__, irlsap_state[state]); 133 IRDA_DEBUG(4, "%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]);
134 */ 134 */
135 self->lsap_state = state; 135 self->lsap_state = state;
136} 136}
@@ -143,7 +143,7 @@ int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event,
143 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); 143 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
144 144
145 IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", 145 IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n",
146 __FUNCTION__, irlmp_event[event], irlsap_state[ self->lsap_state]); 146 __func__, irlmp_event[event], irlsap_state[ self->lsap_state]);
147 147
148 return (*lsap_state[self->lsap_state]) (self, event, skb); 148 return (*lsap_state[self->lsap_state]) (self, event, skb);
149} 149}
@@ -160,7 +160,7 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
160 IRDA_ASSERT(self != NULL, return;); 160 IRDA_ASSERT(self != NULL, return;);
161 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 161 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
162 162
163 IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", __FUNCTION__, 163 IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", __func__,
164 irlmp_event[event], 164 irlmp_event[event],
165 irlmp_state[self->lap_state]); 165 irlmp_state[self->lap_state]);
166 166
@@ -169,7 +169,7 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
169 169
170void irlmp_discovery_timer_expired(void *data) 170void irlmp_discovery_timer_expired(void *data)
171{ 171{
172 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 172 IRDA_DEBUG(4, "%s()\n", __func__);
173 173
174 /* We always cleanup the log (active & passive discovery) */ 174 /* We always cleanup the log (active & passive discovery) */
175 irlmp_do_expiry(); 175 irlmp_do_expiry();
@@ -184,7 +184,7 @@ void irlmp_watchdog_timer_expired(void *data)
184{ 184{
185 struct lsap_cb *self = (struct lsap_cb *) data; 185 struct lsap_cb *self = (struct lsap_cb *) data;
186 186
187 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 187 IRDA_DEBUG(2, "%s()\n", __func__);
188 188
189 IRDA_ASSERT(self != NULL, return;); 189 IRDA_ASSERT(self != NULL, return;);
190 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); 190 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -196,7 +196,7 @@ void irlmp_idle_timer_expired(void *data)
196{ 196{
197 struct lap_cb *self = (struct lap_cb *) data; 197 struct lap_cb *self = (struct lap_cb *) data;
198 198
199 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 199 IRDA_DEBUG(2, "%s()\n", __func__);
200 200
201 IRDA_ASSERT(self != NULL, return;); 201 IRDA_ASSERT(self != NULL, return;);
202 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 202 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
@@ -256,7 +256,7 @@ irlmp_do_all_lsap_event(hashbin_t * lsap_hashbin,
256static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, 256static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
257 struct sk_buff *skb) 257 struct sk_buff *skb)
258{ 258{
259 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 259 IRDA_DEBUG(4, "%s()\n", __func__);
260 IRDA_ASSERT(self->irlap != NULL, return;); 260 IRDA_ASSERT(self->irlap != NULL, return;);
261 261
262 switch (event) { 262 switch (event) {
@@ -276,7 +276,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
276 irlap_connect_response(self->irlap, skb); 276 irlap_connect_response(self->irlap, skb);
277 break; 277 break;
278 case LM_LAP_CONNECT_REQUEST: 278 case LM_LAP_CONNECT_REQUEST:
279 IRDA_DEBUG(4, "%s() LS_CONNECT_REQUEST\n", __FUNCTION__); 279 IRDA_DEBUG(4, "%s() LS_CONNECT_REQUEST\n", __func__);
280 280
281 irlmp_next_lap_state(self, LAP_U_CONNECT); 281 irlmp_next_lap_state(self, LAP_U_CONNECT);
282 282
@@ -285,13 +285,13 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
285 break; 285 break;
286 case LM_LAP_DISCONNECT_INDICATION: 286 case LM_LAP_DISCONNECT_INDICATION:
287 IRDA_DEBUG(4, "%s(), Error LM_LAP_DISCONNECT_INDICATION\n", 287 IRDA_DEBUG(4, "%s(), Error LM_LAP_DISCONNECT_INDICATION\n",
288 __FUNCTION__); 288 __func__);
289 289
290 irlmp_next_lap_state(self, LAP_STANDBY); 290 irlmp_next_lap_state(self, LAP_STANDBY);
291 break; 291 break;
292 default: 292 default:
293 IRDA_DEBUG(0, "%s(), Unknown event %s\n", 293 IRDA_DEBUG(0, "%s(), Unknown event %s\n",
294 __FUNCTION__, irlmp_event[event]); 294 __func__, irlmp_event[event]);
295 break; 295 break;
296 } 296 }
297} 297}
@@ -306,7 +306,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
306static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, 306static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
307 struct sk_buff *skb) 307 struct sk_buff *skb)
308{ 308{
309 IRDA_DEBUG(2, "%s(), event=%s\n", __FUNCTION__, irlmp_event[event]); 309 IRDA_DEBUG(2, "%s(), event=%s\n", __func__, irlmp_event[event]);
310 310
311 switch (event) { 311 switch (event) {
312 case LM_LAP_CONNECT_INDICATION: 312 case LM_LAP_CONNECT_INDICATION:
@@ -326,7 +326,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
326 * the lsaps may already have gone. This avoid getting stuck 326 * the lsaps may already have gone. This avoid getting stuck
327 * forever in LAP_ACTIVE state - Jean II */ 327 * forever in LAP_ACTIVE state - Jean II */
328 if (HASHBIN_GET_SIZE(self->lsaps) == 0) { 328 if (HASHBIN_GET_SIZE(self->lsaps) == 0) {
329 IRDA_DEBUG(0, "%s() NO LSAPs !\n", __FUNCTION__); 329 IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__);
330 irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); 330 irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT);
331 } 331 }
332 break; 332 break;
@@ -344,12 +344,12 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
344 * the lsaps may already have gone. This avoid getting stuck 344 * the lsaps may already have gone. This avoid getting stuck
345 * forever in LAP_ACTIVE state - Jean II */ 345 * forever in LAP_ACTIVE state - Jean II */
346 if (HASHBIN_GET_SIZE(self->lsaps) == 0) { 346 if (HASHBIN_GET_SIZE(self->lsaps) == 0) {
347 IRDA_DEBUG(0, "%s() NO LSAPs !\n", __FUNCTION__); 347 IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__);
348 irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); 348 irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT);
349 } 349 }
350 break; 350 break;
351 case LM_LAP_DISCONNECT_INDICATION: 351 case LM_LAP_DISCONNECT_INDICATION:
352 IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_INDICATION\n", __FUNCTION__); 352 IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__);
353 irlmp_next_lap_state(self, LAP_STANDBY); 353 irlmp_next_lap_state(self, LAP_STANDBY);
354 354
355 /* Send disconnect event to all LSAPs using this link */ 355 /* Send disconnect event to all LSAPs using this link */
@@ -357,7 +357,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
357 LM_LAP_DISCONNECT_INDICATION); 357 LM_LAP_DISCONNECT_INDICATION);
358 break; 358 break;
359 case LM_LAP_DISCONNECT_REQUEST: 359 case LM_LAP_DISCONNECT_REQUEST:
360 IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_REQUEST\n", __FUNCTION__); 360 IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__);
361 361
362 /* One of the LSAP did timeout or was closed, if it was 362 /* One of the LSAP did timeout or was closed, if it was
363 * the last one, try to get out of here - Jean II */ 363 * the last one, try to get out of here - Jean II */
@@ -367,7 +367,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
367 break; 367 break;
368 default: 368 default:
369 IRDA_DEBUG(0, "%s(), Unknown event %s\n", 369 IRDA_DEBUG(0, "%s(), Unknown event %s\n",
370 __FUNCTION__, irlmp_event[event]); 370 __func__, irlmp_event[event]);
371 break; 371 break;
372 } 372 }
373} 373}
@@ -381,11 +381,11 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
381static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, 381static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
382 struct sk_buff *skb) 382 struct sk_buff *skb)
383{ 383{
384 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 384 IRDA_DEBUG(4, "%s()\n", __func__);
385 385
386 switch (event) { 386 switch (event) {
387 case LM_LAP_CONNECT_REQUEST: 387 case LM_LAP_CONNECT_REQUEST:
388 IRDA_DEBUG(4, "%s(), LS_CONNECT_REQUEST\n", __FUNCTION__); 388 IRDA_DEBUG(4, "%s(), LS_CONNECT_REQUEST\n", __func__);
389 389
390 /* 390 /*
391 * IrLAP may have a pending disconnect. We tried to close 391 * IrLAP may have a pending disconnect. We tried to close
@@ -468,7 +468,7 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
468 break; 468 break;
469 default: 469 default:
470 IRDA_DEBUG(0, "%s(), Unknown event %s\n", 470 IRDA_DEBUG(0, "%s(), Unknown event %s\n",
471 __FUNCTION__, irlmp_event[event]); 471 __func__, irlmp_event[event]);
472 break; 472 break;
473 } 473 }
474} 474}
@@ -490,7 +490,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
490{ 490{
491 int ret = 0; 491 int ret = 0;
492 492
493 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 493 IRDA_DEBUG(4, "%s()\n", __func__);
494 494
495 IRDA_ASSERT(self != NULL, return -1;); 495 IRDA_ASSERT(self != NULL, return -1;);
496 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); 496 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
@@ -505,11 +505,11 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
505 break; 505 break;
506#endif /* CONFIG_IRDA_ULTRA */ 506#endif /* CONFIG_IRDA_ULTRA */
507 case LM_CONNECT_REQUEST: 507 case LM_CONNECT_REQUEST:
508 IRDA_DEBUG(4, "%s(), LM_CONNECT_REQUEST\n", __FUNCTION__); 508 IRDA_DEBUG(4, "%s(), LM_CONNECT_REQUEST\n", __func__);
509 509
510 if (self->conn_skb) { 510 if (self->conn_skb) {
511 IRDA_WARNING("%s: busy with another request!\n", 511 IRDA_WARNING("%s: busy with another request!\n",
512 __FUNCTION__); 512 __func__);
513 return -EBUSY; 513 return -EBUSY;
514 } 514 }
515 /* Don't forget to refcount it (see irlmp_connect_request()) */ 515 /* Don't forget to refcount it (see irlmp_connect_request()) */
@@ -526,7 +526,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
526 case LM_CONNECT_INDICATION: 526 case LM_CONNECT_INDICATION:
527 if (self->conn_skb) { 527 if (self->conn_skb) {
528 IRDA_WARNING("%s: busy with another request!\n", 528 IRDA_WARNING("%s: busy with another request!\n",
529 __FUNCTION__); 529 __func__);
530 return -EBUSY; 530 return -EBUSY;
531 } 531 }
532 /* Don't forget to refcount it (see irlap_driver_rcv()) */ 532 /* Don't forget to refcount it (see irlap_driver_rcv()) */
@@ -552,7 +552,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
552 break; 552 break;
553 default: 553 default:
554 IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n", 554 IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n",
555 __FUNCTION__, irlmp_event[event], self->slsap_sel); 555 __func__, irlmp_event[event], self->slsap_sel);
556 break; 556 break;
557 } 557 }
558 return ret; 558 return ret;
@@ -570,7 +570,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
570 struct lsap_cb *lsap; 570 struct lsap_cb *lsap;
571 int ret = 0; 571 int ret = 0;
572 572
573 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 573 IRDA_DEBUG(4, "%s()\n", __func__);
574 574
575 IRDA_ASSERT(self != NULL, return -1;); 575 IRDA_ASSERT(self != NULL, return -1;);
576 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); 576 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
@@ -603,7 +603,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
603 case LM_WATCHDOG_TIMEOUT: 603 case LM_WATCHDOG_TIMEOUT:
604 /* May happen, who knows... 604 /* May happen, who knows...
605 * Jean II */ 605 * Jean II */
606 IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __FUNCTION__); 606 IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__);
607 607
608 /* Disconnect, get out... - Jean II */ 608 /* Disconnect, get out... - Jean II */
609 self->lap = NULL; 609 self->lap = NULL;
@@ -614,7 +614,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
614 /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we 614 /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we
615 * are *not* yet bound to the IrLAP link. Jean II */ 615 * are *not* yet bound to the IrLAP link. Jean II */
616 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", 616 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
617 __FUNCTION__, irlmp_event[event], self->slsap_sel); 617 __func__, irlmp_event[event], self->slsap_sel);
618 break; 618 break;
619 } 619 }
620 return ret; 620 return ret;
@@ -632,7 +632,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
632 struct sk_buff *tx_skb; 632 struct sk_buff *tx_skb;
633 int ret = 0; 633 int ret = 0;
634 634
635 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 635 IRDA_DEBUG(4, "%s()\n", __func__);
636 636
637 IRDA_ASSERT(self != NULL, return -1;); 637 IRDA_ASSERT(self != NULL, return -1;);
638 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); 638 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
@@ -643,16 +643,16 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
643 break; 643 break;
644 case LM_CONNECT_RESPONSE: 644 case LM_CONNECT_RESPONSE:
645 IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " 645 IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, "
646 "no indication issued yet\n", __FUNCTION__); 646 "no indication issued yet\n", __func__);
647 /* Keep state */ 647 /* Keep state */
648 break; 648 break;
649 case LM_DISCONNECT_REQUEST: 649 case LM_DISCONNECT_REQUEST:
650 IRDA_DEBUG(0, "%s(), LM_DISCONNECT_REQUEST, " 650 IRDA_DEBUG(0, "%s(), LM_DISCONNECT_REQUEST, "
651 "not yet bound to IrLAP connection\n", __FUNCTION__); 651 "not yet bound to IrLAP connection\n", __func__);
652 /* Keep state */ 652 /* Keep state */
653 break; 653 break;
654 case LM_LAP_CONNECT_CONFIRM: 654 case LM_LAP_CONNECT_CONFIRM:
655 IRDA_DEBUG(4, "%s(), LS_CONNECT_CONFIRM\n", __FUNCTION__); 655 IRDA_DEBUG(4, "%s(), LS_CONNECT_CONFIRM\n", __func__);
656 irlmp_next_lsap_state(self, LSAP_CONNECT); 656 irlmp_next_lsap_state(self, LSAP_CONNECT);
657 657
658 tx_skb = self->conn_skb; 658 tx_skb = self->conn_skb;
@@ -666,7 +666,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
666 /* Will happen in some rare cases because of a race condition. 666 /* Will happen in some rare cases because of a race condition.
667 * Just make sure we don't stay there forever... 667 * Just make sure we don't stay there forever...
668 * Jean II */ 668 * Jean II */
669 IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __FUNCTION__); 669 IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__);
670 670
671 /* Go back to disconnected mode, keep the socket waiting */ 671 /* Go back to disconnected mode, keep the socket waiting */
672 self->lap = NULL; 672 self->lap = NULL;
@@ -680,7 +680,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
680 /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we 680 /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we
681 * are *not* yet bound to the IrLAP link. Jean II */ 681 * are *not* yet bound to the IrLAP link. Jean II */
682 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", 682 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
683 __FUNCTION__, irlmp_event[event], self->slsap_sel); 683 __func__, irlmp_event[event], self->slsap_sel);
684 break; 684 break;
685 } 685 }
686 return ret; 686 return ret;
@@ -698,7 +698,7 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
698 LM_REASON reason; 698 LM_REASON reason;
699 int ret = 0; 699 int ret = 0;
700 700
701 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 701 IRDA_DEBUG(4, "%s()\n", __func__);
702 702
703 IRDA_ASSERT(self != NULL, return -1;); 703 IRDA_ASSERT(self != NULL, return -1;);
704 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); 704 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
@@ -722,12 +722,12 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
722 break; 722 break;
723 case LM_CONNECT_REQUEST: 723 case LM_CONNECT_REQUEST:
724 IRDA_DEBUG(0, "%s(), LM_CONNECT_REQUEST, " 724 IRDA_DEBUG(0, "%s(), LM_CONNECT_REQUEST, "
725 "error, LSAP already connected\n", __FUNCTION__); 725 "error, LSAP already connected\n", __func__);
726 /* Keep state */ 726 /* Keep state */
727 break; 727 break;
728 case LM_CONNECT_RESPONSE: 728 case LM_CONNECT_RESPONSE:
729 IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " 729 IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, "
730 "error, LSAP already connected\n", __FUNCTION__); 730 "error, LSAP already connected\n", __func__);
731 /* Keep state */ 731 /* Keep state */
732 break; 732 break;
733 case LM_DISCONNECT_REQUEST: 733 case LM_DISCONNECT_REQUEST:
@@ -740,7 +740,7 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
740 /* Try to close the LAP connection if its still there */ 740 /* Try to close the LAP connection if its still there */
741 if (self->lap) { 741 if (self->lap) {
742 IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", 742 IRDA_DEBUG(4, "%s(), trying to close IrLAP\n",
743 __FUNCTION__); 743 __func__);
744 irlmp_do_lap_event(self->lap, 744 irlmp_do_lap_event(self->lap,
745 LM_LAP_DISCONNECT_REQUEST, 745 LM_LAP_DISCONNECT_REQUEST,
746 NULL); 746 NULL);
@@ -764,14 +764,14 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
764 reason = skb->data[3]; 764 reason = skb->data[3];
765 765
766 /* Try to close the LAP connection */ 766 /* Try to close the LAP connection */
767 IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __FUNCTION__); 767 IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__);
768 irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); 768 irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
769 769
770 irlmp_disconnect_indication(self, reason, skb); 770 irlmp_disconnect_indication(self, reason, skb);
771 break; 771 break;
772 default: 772 default:
773 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", 773 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
774 __FUNCTION__, irlmp_event[event], self->slsap_sel); 774 __func__, irlmp_event[event], self->slsap_sel);
775 break; 775 break;
776 } 776 }
777 return ret; 777 return ret;
@@ -793,7 +793,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
793 IRDA_ASSERT(self != NULL, return -1;); 793 IRDA_ASSERT(self != NULL, return -1;);
794 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); 794 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
795 795
796 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 796 IRDA_DEBUG(4, "%s()\n", __func__);
797 797
798 switch (event) { 798 switch (event) {
799 case LM_CONNECT_CONFIRM: 799 case LM_CONNECT_CONFIRM:
@@ -814,7 +814,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
814 reason = skb->data[3]; 814 reason = skb->data[3];
815 815
816 /* Try to close the LAP connection */ 816 /* Try to close the LAP connection */
817 IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __FUNCTION__); 817 IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__);
818 irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); 818 irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
819 819
820 irlmp_disconnect_indication(self, reason, skb); 820 irlmp_disconnect_indication(self, reason, skb);
@@ -832,7 +832,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
832 irlmp_disconnect_indication(self, reason, skb); 832 irlmp_disconnect_indication(self, reason, skb);
833 break; 833 break;
834 case LM_WATCHDOG_TIMEOUT: 834 case LM_WATCHDOG_TIMEOUT:
835 IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __FUNCTION__); 835 IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__);
836 836
837 IRDA_ASSERT(self->lap != NULL, return -1;); 837 IRDA_ASSERT(self->lap != NULL, return -1;);
838 irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); 838 irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
@@ -842,7 +842,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
842 break; 842 break;
843 default: 843 default:
844 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", 844 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
845 __FUNCTION__, irlmp_event[event], self->slsap_sel); 845 __func__, irlmp_event[event], self->slsap_sel);
846 break; 846 break;
847 } 847 }
848 return ret; 848 return ret;
@@ -863,7 +863,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
863 LM_REASON reason; 863 LM_REASON reason;
864 int ret = 0; 864 int ret = 0;
865 865
866 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 866 IRDA_DEBUG(4, "%s()\n", __func__);
867 867
868 IRDA_ASSERT(self != NULL, return -1;); 868 IRDA_ASSERT(self != NULL, return -1;);
869 IRDA_ASSERT(irlmp != NULL, return -1;); 869 IRDA_ASSERT(irlmp != NULL, return -1;);
@@ -883,7 +883,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
883 irlmp_next_lsap_state(self, LSAP_SETUP); 883 irlmp_next_lsap_state(self, LSAP_SETUP);
884 break; 884 break;
885 case LM_WATCHDOG_TIMEOUT: 885 case LM_WATCHDOG_TIMEOUT:
886 IRDA_DEBUG(0, "%s() : WATCHDOG_TIMEOUT !\n", __FUNCTION__); 886 IRDA_DEBUG(0, "%s() : WATCHDOG_TIMEOUT !\n", __func__);
887 887
888 IRDA_ASSERT(self->lap != NULL, return -1;); 888 IRDA_ASSERT(self->lap != NULL, return -1;);
889 irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); 889 irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
@@ -902,7 +902,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
902 break; 902 break;
903 default: 903 default:
904 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", 904 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
905 __FUNCTION__, irlmp_event[event], self->slsap_sel); 905 __func__, irlmp_event[event], self->slsap_sel);
906 break; 906 break;
907 } 907 }
908 return ret; 908 return ret;
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index 0a79d9aeb08c..3750884094da 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -44,7 +44,7 @@ inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
44 skb->data[1] = slsap; 44 skb->data[1] = slsap;
45 45
46 if (expedited) { 46 if (expedited) {
47 IRDA_DEBUG(4, "%s(), sending expedited data\n", __FUNCTION__); 47 IRDA_DEBUG(4, "%s(), sending expedited data\n", __func__);
48 irlap_data_request(self->irlap, skb, TRUE); 48 irlap_data_request(self->irlap, skb, TRUE);
49 } else 49 } else
50 irlap_data_request(self->irlap, skb, FALSE); 50 irlap_data_request(self->irlap, skb, FALSE);
@@ -60,7 +60,7 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
60{ 60{
61 __u8 *frame; 61 __u8 *frame;
62 62
63 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 63 IRDA_DEBUG(2, "%s()\n", __func__);
64 64
65 IRDA_ASSERT(self != NULL, return;); 65 IRDA_ASSERT(self != NULL, return;);
66 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 66 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
@@ -95,7 +95,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
95 __u8 dlsap_sel; /* Destination LSAP address */ 95 __u8 dlsap_sel; /* Destination LSAP address */
96 __u8 *fp; 96 __u8 *fp;
97 97
98 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 98 IRDA_DEBUG(4, "%s()\n", __func__);
99 99
100 IRDA_ASSERT(self != NULL, return;); 100 IRDA_ASSERT(self != NULL, return;);
101 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 101 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
@@ -117,7 +117,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
117 if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) { 117 if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) {
118 IRDA_DEBUG(3, "%s(), incoming connection, " 118 IRDA_DEBUG(3, "%s(), incoming connection, "
119 "source LSAP=%d, dest LSAP=%d\n", 119 "source LSAP=%d, dest LSAP=%d\n",
120 __FUNCTION__, slsap_sel, dlsap_sel); 120 __func__, slsap_sel, dlsap_sel);
121 121
122 /* Try to find LSAP among the unconnected LSAPs */ 122 /* Try to find LSAP among the unconnected LSAPs */
123 lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD, 123 lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD,
@@ -125,7 +125,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
125 125
126 /* Maybe LSAP was already connected, so try one more time */ 126 /* Maybe LSAP was already connected, so try one more time */
127 if (!lsap) { 127 if (!lsap) {
128 IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __FUNCTION__); 128 IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __func__);
129 lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, 129 lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
130 self->lsaps); 130 self->lsaps);
131 } 131 }
@@ -136,12 +136,12 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
136 if (lsap == NULL) { 136 if (lsap == NULL) {
137 IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n"); 137 IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n");
138 IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n", 138 IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n",
139 __FUNCTION__, slsap_sel, dlsap_sel); 139 __func__, slsap_sel, dlsap_sel);
140 if (fp[0] & CONTROL_BIT) { 140 if (fp[0] & CONTROL_BIT) {
141 IRDA_DEBUG(2, "%s(), received control frame %02x\n", 141 IRDA_DEBUG(2, "%s(), received control frame %02x\n",
142 __FUNCTION__, fp[2]); 142 __func__, fp[2]);
143 } else { 143 } else {
144 IRDA_DEBUG(2, "%s(), received data frame\n", __FUNCTION__); 144 IRDA_DEBUG(2, "%s(), received data frame\n", __func__);
145 } 145 }
146 return; 146 return;
147 } 147 }
@@ -160,7 +160,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
160 break; 160 break;
161 case DISCONNECT: 161 case DISCONNECT:
162 IRDA_DEBUG(4, "%s(), Disconnect indication!\n", 162 IRDA_DEBUG(4, "%s(), Disconnect indication!\n",
163 __FUNCTION__); 163 __func__);
164 irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION, 164 irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION,
165 skb); 165 skb);
166 break; 166 break;
@@ -172,7 +172,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
172 break; 172 break;
173 default: 173 default:
174 IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n", 174 IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n",
175 __FUNCTION__, fp[2]); 175 __func__, fp[2]);
176 break; 176 break;
177 } 177 }
178 } else if (unreliable) { 178 } else if (unreliable) {
@@ -206,7 +206,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
206 __u8 *fp; 206 __u8 *fp;
207 unsigned long flags; 207 unsigned long flags;
208 208
209 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 209 IRDA_DEBUG(4, "%s()\n", __func__);
210 210
211 IRDA_ASSERT(self != NULL, return;); 211 IRDA_ASSERT(self != NULL, return;);
212 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 212 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
@@ -224,13 +224,13 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
224 224
225 if (pid & 0x80) { 225 if (pid & 0x80) {
226 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", 226 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n",
227 __FUNCTION__); 227 __func__);
228 return; 228 return;
229 } 229 }
230 230
231 /* Check if frame is addressed to the connectionless LSAP */ 231 /* Check if frame is addressed to the connectionless LSAP */
232 if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) { 232 if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) {
233 IRDA_DEBUG(0, "%s(), dropping frame!\n", __FUNCTION__); 233 IRDA_DEBUG(0, "%s(), dropping frame!\n", __func__);
234 return; 234 return;
235 } 235 }
236 236
@@ -254,7 +254,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
254 if (lsap) 254 if (lsap)
255 irlmp_connless_data_indication(lsap, skb); 255 irlmp_connless_data_indication(lsap, skb);
256 else { 256 else {
257 IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __FUNCTION__); 257 IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __func__);
258 } 258 }
259} 259}
260#endif /* CONFIG_IRDA_ULTRA */ 260#endif /* CONFIG_IRDA_ULTRA */
@@ -270,7 +270,7 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap,
270 LAP_REASON reason, 270 LAP_REASON reason,
271 struct sk_buff *skb) 271 struct sk_buff *skb)
272{ 272{
273 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 273 IRDA_DEBUG(2, "%s()\n", __func__);
274 274
275 IRDA_ASSERT(lap != NULL, return;); 275 IRDA_ASSERT(lap != NULL, return;);
276 IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); 276 IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
@@ -296,7 +296,7 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
296 __u32 daddr, struct qos_info *qos, 296 __u32 daddr, struct qos_info *qos,
297 struct sk_buff *skb) 297 struct sk_buff *skb)
298{ 298{
299 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 299 IRDA_DEBUG(4, "%s()\n", __func__);
300 300
301 /* Copy QoS settings for this session */ 301 /* Copy QoS settings for this session */
302 self->qos = qos; 302 self->qos = qos;
@@ -317,7 +317,7 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
317void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos, 317void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
318 struct sk_buff *skb) 318 struct sk_buff *skb)
319{ 319{
320 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 320 IRDA_DEBUG(4, "%s()\n", __func__);
321 321
322 IRDA_ASSERT(self != NULL, return;); 322 IRDA_ASSERT(self != NULL, return;);
323 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 323 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
@@ -383,7 +383,7 @@ void irlmp_link_discovery_indication(struct lap_cb *self,
383 */ 383 */
384void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log) 384void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
385{ 385{
386 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 386 IRDA_DEBUG(4, "%s()\n", __func__);
387 387
388 IRDA_ASSERT(self != NULL, return;); 388 IRDA_ASSERT(self != NULL, return;);
389 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 389 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index 01554b996b9b..4c487a883725 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -90,7 +90,7 @@ static int __init irda_init(void)
90{ 90{
91 int ret = 0; 91 int ret = 0;
92 92
93 IRDA_DEBUG(0, "%s()\n", __FUNCTION__); 93 IRDA_DEBUG(0, "%s()\n", __func__);
94 94
95 /* Lower layer of the stack */ 95 /* Lower layer of the stack */
96 irlmp_init(); 96 irlmp_init();
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 7873c392ab4c..b001c361ad30 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -337,27 +337,27 @@
337/* All error messages (will show up in the normal logs) */ 337/* All error messages (will show up in the normal logs) */
338#define DERROR(dbg, format, args...) \ 338#define DERROR(dbg, format, args...) \
339 {if(DEBUG_##dbg) \ 339 {if(DEBUG_##dbg) \
340 printk(KERN_INFO "irnet: %s(): " format, __FUNCTION__ , ##args);} 340 printk(KERN_INFO "irnet: %s(): " format, __func__ , ##args);}
341 341
342/* Normal debug message (will show up in /var/log/debug) */ 342/* Normal debug message (will show up in /var/log/debug) */
343#define DEBUG(dbg, format, args...) \ 343#define DEBUG(dbg, format, args...) \
344 {if(DEBUG_##dbg) \ 344 {if(DEBUG_##dbg) \
345 printk(KERN_DEBUG "irnet: %s(): " format, __FUNCTION__ , ##args);} 345 printk(KERN_DEBUG "irnet: %s(): " format, __func__ , ##args);}
346 346
347/* Entering a function (trace) */ 347/* Entering a function (trace) */
348#define DENTER(dbg, format, args...) \ 348#define DENTER(dbg, format, args...) \
349 {if(DEBUG_##dbg) \ 349 {if(DEBUG_##dbg) \
350 printk(KERN_DEBUG "irnet: -> %s" format, __FUNCTION__ , ##args);} 350 printk(KERN_DEBUG "irnet: -> %s" format, __func__ , ##args);}
351 351
352/* Entering and exiting a function in one go (trace) */ 352/* Entering and exiting a function in one go (trace) */
353#define DPASS(dbg, format, args...) \ 353#define DPASS(dbg, format, args...) \
354 {if(DEBUG_##dbg) \ 354 {if(DEBUG_##dbg) \
355 printk(KERN_DEBUG "irnet: <>%s" format, __FUNCTION__ , ##args);} 355 printk(KERN_DEBUG "irnet: <>%s" format, __func__ , ##args);}
356 356
357/* Exiting a function (trace) */ 357/* Exiting a function (trace) */
358#define DEXIT(dbg, format, args...) \ 358#define DEXIT(dbg, format, args...) \
359 {if(DEBUG_##dbg) \ 359 {if(DEBUG_##dbg) \
360 printk(KERN_DEBUG "irnet: <-%s()" format, __FUNCTION__ , ##args);} 360 printk(KERN_DEBUG "irnet: <-%s()" format, __func__ , ##args);}
361 361
362/* Exit a function with debug */ 362/* Exit a function with debug */
363#define DRETURN(ret, dbg, args...) \ 363#define DRETURN(ret, dbg, args...) \
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index cd9ff176ecde..9e1fb82e3220 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -40,7 +40,7 @@ static struct net_device * ifname_to_netdev(struct net *net, struct genl_info *i
40 40
41 ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]); 41 ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]);
42 42
43 IRDA_DEBUG(5, "%s(): Looking for %s\n", __FUNCTION__, ifname); 43 IRDA_DEBUG(5, "%s(): Looking for %s\n", __func__, ifname);
44 44
45 return dev_get_by_name(net, ifname); 45 return dev_get_by_name(net, ifname);
46} 46}
@@ -56,7 +56,7 @@ static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info)
56 56
57 mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]); 57 mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]);
58 58
59 IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __FUNCTION__, mode); 59 IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __func__, mode);
60 60
61 dev = ifname_to_netdev(&init_net, info); 61 dev = ifname_to_netdev(&init_net, info);
62 if (!dev) 62 if (!dev)
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 40c28efaed95..ba01938becb5 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -232,7 +232,7 @@ static __u32 hash( const char* name)
232static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) 232static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
233{ 233{
234 234
235 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 235 IRDA_DEBUG( 4, "%s()\n", __func__);
236 236
237 /* 237 /*
238 * Check if queue is empty. 238 * Check if queue is empty.
@@ -451,7 +451,7 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
451 unsigned long flags = 0; 451 unsigned long flags = 0;
452 int bin; 452 int bin;
453 453
454 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 454 IRDA_DEBUG( 4, "%s()\n", __func__);
455 455
456 IRDA_ASSERT( hashbin != NULL, return;); 456 IRDA_ASSERT( hashbin != NULL, return;);
457 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); 457 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;);
@@ -564,7 +564,7 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
564 unsigned long flags = 0; 564 unsigned long flags = 0;
565 irda_queue_t* entry; 565 irda_queue_t* entry;
566 566
567 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 567 IRDA_DEBUG( 4, "%s()\n", __func__);
568 568
569 IRDA_ASSERT( hashbin != NULL, return NULL;); 569 IRDA_ASSERT( hashbin != NULL, return NULL;);
570 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 570 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
@@ -657,7 +657,7 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
657 int bin; 657 int bin;
658 long hashv; 658 long hashv;
659 659
660 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 660 IRDA_DEBUG( 4, "%s()\n", __func__);
661 661
662 IRDA_ASSERT( hashbin != NULL, return NULL;); 662 IRDA_ASSERT( hashbin != NULL, return NULL;);
663 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 663 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 97db158c9274..74e439e80823 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -95,7 +95,7 @@ int __init irttp_init(void)
95 irttp->tsaps = hashbin_new(HB_LOCK); 95 irttp->tsaps = hashbin_new(HB_LOCK);
96 if (!irttp->tsaps) { 96 if (!irttp->tsaps) {
97 IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n", 97 IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n",
98 __FUNCTION__); 98 __func__);
99 kfree(irttp); 99 kfree(irttp);
100 return -ENOMEM; 100 return -ENOMEM;
101 } 101 }
@@ -164,7 +164,7 @@ static void irttp_todo_expired(unsigned long data)
164 if (!self || self->magic != TTP_TSAP_MAGIC) 164 if (!self || self->magic != TTP_TSAP_MAGIC)
165 return; 165 return;
166 166
167 IRDA_DEBUG(4, "%s(instance=%p)\n", __FUNCTION__, self); 167 IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
168 168
169 /* Try to make some progress, especially on Tx side - Jean II */ 169 /* Try to make some progress, especially on Tx side - Jean II */
170 irttp_run_rx_queue(self); 170 irttp_run_rx_queue(self);
@@ -205,7 +205,7 @@ void irttp_flush_queues(struct tsap_cb *self)
205{ 205{
206 struct sk_buff* skb; 206 struct sk_buff* skb;
207 207
208 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 208 IRDA_DEBUG(4, "%s()\n", __func__);
209 209
210 IRDA_ASSERT(self != NULL, return;); 210 IRDA_ASSERT(self != NULL, return;);
211 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 211 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -238,7 +238,7 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
238 IRDA_ASSERT(self != NULL, return NULL;); 238 IRDA_ASSERT(self != NULL, return NULL;);
239 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;); 239 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
240 240
241 IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __FUNCTION__, 241 IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__,
242 self->rx_sdu_size); 242 self->rx_sdu_size);
243 243
244 skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size); 244 skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
@@ -264,7 +264,7 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
264 264
265 IRDA_DEBUG(2, 265 IRDA_DEBUG(2,
266 "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n", 266 "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
267 __FUNCTION__, n, self->rx_sdu_size, self->rx_max_sdu_size); 267 __func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
268 /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size 268 /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
269 * by summing the size of all fragments, so we should always 269 * by summing the size of all fragments, so we should always
270 * have n == self->rx_sdu_size, except in cases where we 270 * have n == self->rx_sdu_size, except in cases where we
@@ -293,7 +293,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
293 struct sk_buff *frag; 293 struct sk_buff *frag;
294 __u8 *frame; 294 __u8 *frame;
295 295
296 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 296 IRDA_DEBUG(2, "%s()\n", __func__);
297 297
298 IRDA_ASSERT(self != NULL, return;); 298 IRDA_ASSERT(self != NULL, return;);
299 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 299 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -303,7 +303,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
303 * Split frame into a number of segments 303 * Split frame into a number of segments
304 */ 304 */
305 while (skb->len > self->max_seg_size) { 305 while (skb->len > self->max_seg_size) {
306 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); 306 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__);
307 307
308 /* Make new segment */ 308 /* Make new segment */
309 frag = alloc_skb(self->max_seg_size+self->max_header_size, 309 frag = alloc_skb(self->max_seg_size+self->max_header_size,
@@ -328,7 +328,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
328 skb_queue_tail(&self->tx_queue, frag); 328 skb_queue_tail(&self->tx_queue, frag);
329 } 329 }
330 /* Queue what is left of the original skb */ 330 /* Queue what is left of the original skb */
331 IRDA_DEBUG(2, "%s(), queuing last segment\n", __FUNCTION__); 331 IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__);
332 332
333 frame = skb_push(skb, TTP_HEADER); 333 frame = skb_push(skb, TTP_HEADER);
334 frame[0] = 0x00; /* Clear more bit */ 334 frame[0] = 0x00; /* Clear more bit */
@@ -359,7 +359,7 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
359 else 359 else
360 self->tx_max_sdu_size = param->pv.i; 360 self->tx_max_sdu_size = param->pv.i;
361 361
362 IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __FUNCTION__, param->pv.i); 362 IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i);
363 363
364 return 0; 364 return 0;
365} 365}
@@ -400,13 +400,13 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
400 * JeanII */ 400 * JeanII */
401 if((stsap_sel != LSAP_ANY) && 401 if((stsap_sel != LSAP_ANY) &&
402 ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) { 402 ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
403 IRDA_DEBUG(0, "%s(), invalid tsap!\n", __FUNCTION__); 403 IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
404 return NULL; 404 return NULL;
405 } 405 }
406 406
407 self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 407 self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
408 if (self == NULL) { 408 if (self == NULL) {
409 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); 409 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__);
410 return NULL; 410 return NULL;
411 } 411 }
412 412
@@ -438,7 +438,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
438 */ 438 */
439 lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); 439 lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
440 if (lsap == NULL) { 440 if (lsap == NULL) {
441 IRDA_WARNING("%s: unable to allocate LSAP!!\n", __FUNCTION__); 441 IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__);
442 return NULL; 442 return NULL;
443 } 443 }
444 444
@@ -448,7 +448,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
448 * the stsap_sel we have might not be valid anymore 448 * the stsap_sel we have might not be valid anymore
449 */ 449 */
450 self->stsap_sel = lsap->slsap_sel; 450 self->stsap_sel = lsap->slsap_sel;
451 IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __FUNCTION__, self->stsap_sel); 451 IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
452 452
453 self->notify = *notify; 453 self->notify = *notify;
454 self->lsap = lsap; 454 self->lsap = lsap;
@@ -506,7 +506,7 @@ int irttp_close_tsap(struct tsap_cb *self)
506{ 506{
507 struct tsap_cb *tsap; 507 struct tsap_cb *tsap;
508 508
509 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 509 IRDA_DEBUG(4, "%s()\n", __func__);
510 510
511 IRDA_ASSERT(self != NULL, return -1;); 511 IRDA_ASSERT(self != NULL, return -1;);
512 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); 512 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
@@ -516,7 +516,7 @@ int irttp_close_tsap(struct tsap_cb *self)
516 /* Check if disconnect is not pending */ 516 /* Check if disconnect is not pending */
517 if (!test_bit(0, &self->disconnect_pend)) { 517 if (!test_bit(0, &self->disconnect_pend)) {
518 IRDA_WARNING("%s: TSAP still connected!\n", 518 IRDA_WARNING("%s: TSAP still connected!\n",
519 __FUNCTION__); 519 __func__);
520 irttp_disconnect_request(self, NULL, P_NORMAL); 520 irttp_disconnect_request(self, NULL, P_NORMAL);
521 } 521 }
522 self->close_pend = TRUE; 522 self->close_pend = TRUE;
@@ -553,18 +553,18 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
553 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); 553 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
554 IRDA_ASSERT(skb != NULL, return -1;); 554 IRDA_ASSERT(skb != NULL, return -1;);
555 555
556 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 556 IRDA_DEBUG(4, "%s()\n", __func__);
557 557
558 /* Check that nothing bad happens */ 558 /* Check that nothing bad happens */
559 if ((skb->len == 0) || (!self->connected)) { 559 if ((skb->len == 0) || (!self->connected)) {
560 IRDA_DEBUG(1, "%s(), No data, or not connected\n", 560 IRDA_DEBUG(1, "%s(), No data, or not connected\n",
561 __FUNCTION__); 561 __func__);
562 goto err; 562 goto err;
563 } 563 }
564 564
565 if (skb->len > self->max_seg_size) { 565 if (skb->len > self->max_seg_size) {
566 IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n", 566 IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n",
567 __FUNCTION__); 567 __func__);
568 goto err; 568 goto err;
569 } 569 }
570 570
@@ -595,12 +595,12 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
595 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); 595 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
596 IRDA_ASSERT(skb != NULL, return -1;); 596 IRDA_ASSERT(skb != NULL, return -1;);
597 597
598 IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, 598 IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
599 skb_queue_len(&self->tx_queue)); 599 skb_queue_len(&self->tx_queue));
600 600
601 /* Check that nothing bad happens */ 601 /* Check that nothing bad happens */
602 if ((skb->len == 0) || (!self->connected)) { 602 if ((skb->len == 0) || (!self->connected)) {
603 IRDA_WARNING("%s: No data, or not connected\n", __FUNCTION__); 603 IRDA_WARNING("%s: No data, or not connected\n", __func__);
604 ret = -ENOTCONN; 604 ret = -ENOTCONN;
605 goto err; 605 goto err;
606 } 606 }
@@ -611,7 +611,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
611 */ 611 */
612 if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) { 612 if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
613 IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n", 613 IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n",
614 __FUNCTION__); 614 __func__);
615 ret = -EMSGSIZE; 615 ret = -EMSGSIZE;
616 goto err; 616 goto err;
617 } 617 }
@@ -625,7 +625,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
625 (skb->len > self->tx_max_sdu_size)) 625 (skb->len > self->tx_max_sdu_size))
626 { 626 {
627 IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", 627 IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
628 __FUNCTION__); 628 __func__);
629 ret = -EMSGSIZE; 629 ret = -EMSGSIZE;
630 goto err; 630 goto err;
631 } 631 }
@@ -704,7 +704,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
704 int n; 704 int n;
705 705
706 IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n", 706 IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n",
707 __FUNCTION__, 707 __func__,
708 self->send_credit, skb_queue_len(&self->tx_queue)); 708 self->send_credit, skb_queue_len(&self->tx_queue));
709 709
710 /* Get exclusive access to the tx queue, otherwise don't touch it */ 710 /* Get exclusive access to the tx queue, otherwise don't touch it */
@@ -813,7 +813,7 @@ static inline void irttp_give_credit(struct tsap_cb *self)
813 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 813 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
814 814
815 IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", 815 IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n",
816 __FUNCTION__, 816 __func__,
817 self->send_credit, self->avail_credit, self->remote_credit); 817 self->send_credit, self->avail_credit, self->remote_credit);
818 818
819 /* Give credit to peer */ 819 /* Give credit to peer */
@@ -862,7 +862,7 @@ static int irttp_udata_indication(void *instance, void *sap,
862 struct tsap_cb *self; 862 struct tsap_cb *self;
863 int err; 863 int err;
864 864
865 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 865 IRDA_DEBUG(4, "%s()\n", __func__);
866 866
867 self = (struct tsap_cb *) instance; 867 self = (struct tsap_cb *) instance;
868 868
@@ -979,7 +979,7 @@ static void irttp_status_indication(void *instance,
979{ 979{
980 struct tsap_cb *self; 980 struct tsap_cb *self;
981 981
982 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 982 IRDA_DEBUG(4, "%s()\n", __func__);
983 983
984 self = (struct tsap_cb *) instance; 984 self = (struct tsap_cb *) instance;
985 985
@@ -997,7 +997,7 @@ static void irttp_status_indication(void *instance,
997 self->notify.status_indication(self->notify.instance, 997 self->notify.status_indication(self->notify.instance,
998 link, lock); 998 link, lock);
999 else 999 else
1000 IRDA_DEBUG(2, "%s(), no handler\n", __FUNCTION__); 1000 IRDA_DEBUG(2, "%s(), no handler\n", __func__);
1001} 1001}
1002 1002
1003/* 1003/*
@@ -1015,7 +1015,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
1015 IRDA_ASSERT(self != NULL, return;); 1015 IRDA_ASSERT(self != NULL, return;);
1016 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 1016 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1017 1017
1018 IRDA_DEBUG(4, "%s(instance=%p)\n", __FUNCTION__, self); 1018 IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
1019 1019
1020 /* We are "polled" directly from LAP, and the LAP want to fill 1020 /* We are "polled" directly from LAP, and the LAP want to fill
1021 * its Tx window. We want to do our best to send it data, so that 1021 * its Tx window. We want to do our best to send it data, so that
@@ -1053,18 +1053,18 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
1053 */ 1053 */
1054void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) 1054void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
1055{ 1055{
1056 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1056 IRDA_DEBUG(1, "%s()\n", __func__);
1057 1057
1058 IRDA_ASSERT(self != NULL, return;); 1058 IRDA_ASSERT(self != NULL, return;);
1059 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 1059 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1060 1060
1061 switch (flow) { 1061 switch (flow) {
1062 case FLOW_STOP: 1062 case FLOW_STOP:
1063 IRDA_DEBUG(1, "%s(), flow stop\n", __FUNCTION__); 1063 IRDA_DEBUG(1, "%s(), flow stop\n", __func__);
1064 self->rx_sdu_busy = TRUE; 1064 self->rx_sdu_busy = TRUE;
1065 break; 1065 break;
1066 case FLOW_START: 1066 case FLOW_START:
1067 IRDA_DEBUG(1, "%s(), flow start\n", __FUNCTION__); 1067 IRDA_DEBUG(1, "%s(), flow start\n", __func__);
1068 self->rx_sdu_busy = FALSE; 1068 self->rx_sdu_busy = FALSE;
1069 1069
1070 /* Client say he can accept more data, try to free our 1070 /* Client say he can accept more data, try to free our
@@ -1073,7 +1073,7 @@ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
1073 1073
1074 break; 1074 break;
1075 default: 1075 default:
1076 IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __FUNCTION__); 1076 IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__);
1077 } 1077 }
1078} 1078}
1079EXPORT_SYMBOL(irttp_flow_request); 1079EXPORT_SYMBOL(irttp_flow_request);
@@ -1093,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1093 __u8 *frame; 1093 __u8 *frame;
1094 __u8 n; 1094 __u8 n;
1095 1095
1096 IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __FUNCTION__, max_sdu_size); 1096 IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
1097 1097
1098 IRDA_ASSERT(self != NULL, return -EBADR;); 1098 IRDA_ASSERT(self != NULL, return -EBADR;);
1099 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;); 1099 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
@@ -1191,7 +1191,7 @@ static void irttp_connect_confirm(void *instance, void *sap,
1191 __u8 plen; 1191 __u8 plen;
1192 __u8 n; 1192 __u8 n;
1193 1193
1194 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1194 IRDA_DEBUG(4, "%s()\n", __func__);
1195 1195
1196 self = (struct tsap_cb *) instance; 1196 self = (struct tsap_cb *) instance;
1197 1197
@@ -1215,7 +1215,7 @@ static void irttp_connect_confirm(void *instance, void *sap,
1215 1215
1216 n = skb->data[0] & 0x7f; 1216 n = skb->data[0] & 0x7f;
1217 1217
1218 IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __FUNCTION__, n); 1218 IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n);
1219 1219
1220 self->send_credit = n; 1220 self->send_credit = n;
1221 self->tx_max_sdu_size = 0; 1221 self->tx_max_sdu_size = 0;
@@ -1236,7 +1236,7 @@ static void irttp_connect_confirm(void *instance, void *sap,
1236 /* Any errors in the parameter list? */ 1236 /* Any errors in the parameter list? */
1237 if (ret < 0) { 1237 if (ret < 0) {
1238 IRDA_WARNING("%s: error extracting parameters\n", 1238 IRDA_WARNING("%s: error extracting parameters\n",
1239 __FUNCTION__); 1239 __func__);
1240 dev_kfree_skb(skb); 1240 dev_kfree_skb(skb);
1241 1241
1242 /* Do not accept this connection attempt */ 1242 /* Do not accept this connection attempt */
@@ -1246,10 +1246,10 @@ static void irttp_connect_confirm(void *instance, void *sap,
1246 skb_pull(skb, IRDA_MIN(skb->len, plen+1)); 1246 skb_pull(skb, IRDA_MIN(skb->len, plen+1));
1247 } 1247 }
1248 1248
1249 IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__, 1249 IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__,
1250 self->send_credit, self->avail_credit, self->remote_credit); 1250 self->send_credit, self->avail_credit, self->remote_credit);
1251 1251
1252 IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __FUNCTION__, 1252 IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__,
1253 self->tx_max_sdu_size); 1253 self->tx_max_sdu_size);
1254 1254
1255 if (self->notify.connect_confirm) { 1255 if (self->notify.connect_confirm) {
@@ -1288,7 +1288,7 @@ void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos,
1288 self->max_seg_size = max_seg_size - TTP_HEADER; 1288 self->max_seg_size = max_seg_size - TTP_HEADER;
1289 self->max_header_size = max_header_size+TTP_HEADER; 1289 self->max_header_size = max_header_size+TTP_HEADER;
1290 1290
1291 IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __FUNCTION__, self->stsap_sel); 1291 IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
1292 1292
1293 /* Need to update dtsap_sel if its equal to LSAP_ANY */ 1293 /* Need to update dtsap_sel if its equal to LSAP_ANY */
1294 self->dtsap_sel = lsap->dlsap_sel; 1294 self->dtsap_sel = lsap->dlsap_sel;
@@ -1313,7 +1313,7 @@ void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos,
1313 /* Any errors in the parameter list? */ 1313 /* Any errors in the parameter list? */
1314 if (ret < 0) { 1314 if (ret < 0) {
1315 IRDA_WARNING("%s: error extracting parameters\n", 1315 IRDA_WARNING("%s: error extracting parameters\n",
1316 __FUNCTION__); 1316 __func__);
1317 dev_kfree_skb(skb); 1317 dev_kfree_skb(skb);
1318 1318
1319 /* Do not accept this connection attempt */ 1319 /* Do not accept this connection attempt */
@@ -1350,7 +1350,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1350 IRDA_ASSERT(self != NULL, return -1;); 1350 IRDA_ASSERT(self != NULL, return -1;);
1351 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); 1351 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1352 1352
1353 IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __FUNCTION__, 1353 IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__,
1354 self->stsap_sel); 1354 self->stsap_sel);
1355 1355
1356 /* Any userdata supplied? */ 1356 /* Any userdata supplied? */
@@ -1432,14 +1432,14 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
1432 struct tsap_cb *new; 1432 struct tsap_cb *new;
1433 unsigned long flags; 1433 unsigned long flags;
1434 1434
1435 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1435 IRDA_DEBUG(1, "%s()\n", __func__);
1436 1436
1437 /* Protect our access to the old tsap instance */ 1437 /* Protect our access to the old tsap instance */
1438 spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags); 1438 spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
1439 1439
1440 /* Find the old instance */ 1440 /* Find the old instance */
1441 if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) { 1441 if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
1442 IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __FUNCTION__); 1442 IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__);
1443 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); 1443 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1444 return NULL; 1444 return NULL;
1445 } 1445 }
@@ -1447,7 +1447,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
1447 /* Allocate a new instance */ 1447 /* Allocate a new instance */
1448 new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 1448 new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
1449 if (!new) { 1449 if (!new) {
1450 IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__); 1450 IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
1451 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); 1451 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1452 return NULL; 1452 return NULL;
1453 } 1453 }
@@ -1460,7 +1460,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
1460 /* Try to dup the LSAP (may fail if we were too slow) */ 1460 /* Try to dup the LSAP (may fail if we were too slow) */
1461 new->lsap = irlmp_dup(orig->lsap, new); 1461 new->lsap = irlmp_dup(orig->lsap, new);
1462 if (!new->lsap) { 1462 if (!new->lsap) {
1463 IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__); 1463 IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
1464 kfree(new); 1464 kfree(new);
1465 return NULL; 1465 return NULL;
1466 } 1466 }
@@ -1495,7 +1495,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1495 1495
1496 /* Already disconnected? */ 1496 /* Already disconnected? */
1497 if (!self->connected) { 1497 if (!self->connected) {
1498 IRDA_DEBUG(4, "%s(), already disconnected!\n", __FUNCTION__); 1498 IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__);
1499 if (userdata) 1499 if (userdata)
1500 dev_kfree_skb(userdata); 1500 dev_kfree_skb(userdata);
1501 return -1; 1501 return -1;
@@ -1508,7 +1508,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1508 * Jean II */ 1508 * Jean II */
1509 if(test_and_set_bit(0, &self->disconnect_pend)) { 1509 if(test_and_set_bit(0, &self->disconnect_pend)) {
1510 IRDA_DEBUG(0, "%s(), disconnect already pending\n", 1510 IRDA_DEBUG(0, "%s(), disconnect already pending\n",
1511 __FUNCTION__); 1511 __func__);
1512 if (userdata) 1512 if (userdata)
1513 dev_kfree_skb(userdata); 1513 dev_kfree_skb(userdata);
1514 1514
@@ -1527,7 +1527,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1527 * disconnecting right now since the data will 1527 * disconnecting right now since the data will
1528 * not have any usable connection to be sent on 1528 * not have any usable connection to be sent on
1529 */ 1529 */
1530 IRDA_DEBUG(1, "%s(): High priority!!()\n", __FUNCTION__); 1530 IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__);
1531 irttp_flush_queues(self); 1531 irttp_flush_queues(self);
1532 } else if (priority == P_NORMAL) { 1532 } else if (priority == P_NORMAL) {
1533 /* 1533 /*
@@ -1548,7 +1548,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1548 * be sent at the LMP level (so even if the peer has its Tx queue 1548 * be sent at the LMP level (so even if the peer has its Tx queue
1549 * full of data). - Jean II */ 1549 * full of data). - Jean II */
1550 1550
1551 IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __FUNCTION__); 1551 IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__);
1552 self->connected = FALSE; 1552 self->connected = FALSE;
1553 1553
1554 if (!userdata) { 1554 if (!userdata) {
@@ -1584,7 +1584,7 @@ void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason,
1584{ 1584{
1585 struct tsap_cb *self; 1585 struct tsap_cb *self;
1586 1586
1587 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1587 IRDA_DEBUG(4, "%s()\n", __func__);
1588 1588
1589 self = (struct tsap_cb *) instance; 1589 self = (struct tsap_cb *) instance;
1590 1590
@@ -1644,7 +1644,7 @@ static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
1644 * give an error back 1644 * give an error back
1645 */ 1645 */
1646 if (err) { 1646 if (err) {
1647 IRDA_DEBUG(0, "%s() requeueing skb!\n", __FUNCTION__); 1647 IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__);
1648 1648
1649 /* Make sure we take a break */ 1649 /* Make sure we take a break */
1650 self->rx_sdu_busy = TRUE; 1650 self->rx_sdu_busy = TRUE;
@@ -1669,7 +1669,7 @@ void irttp_run_rx_queue(struct tsap_cb *self)
1669 struct sk_buff *skb; 1669 struct sk_buff *skb;
1670 int more = 0; 1670 int more = 0;
1671 1671
1672 IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__, 1672 IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__,
1673 self->send_credit, self->avail_credit, self->remote_credit); 1673 self->send_credit, self->avail_credit, self->remote_credit);
1674 1674
1675 /* Get exclusive access to the rx queue, otherwise don't touch it */ 1675 /* Get exclusive access to the rx queue, otherwise don't touch it */
@@ -1710,7 +1710,7 @@ void irttp_run_rx_queue(struct tsap_cb *self)
1710 */ 1710 */
1711 if (self->rx_sdu_size <= self->rx_max_sdu_size) { 1711 if (self->rx_sdu_size <= self->rx_max_sdu_size) {
1712 IRDA_DEBUG(4, "%s(), queueing frag\n", 1712 IRDA_DEBUG(4, "%s(), queueing frag\n",
1713 __FUNCTION__); 1713 __func__);
1714 skb_queue_tail(&self->rx_fragments, skb); 1714 skb_queue_tail(&self->rx_fragments, skb);
1715 } else { 1715 } else {
1716 /* Free the part of the SDU that is too big */ 1716 /* Free the part of the SDU that is too big */
@@ -1740,7 +1740,7 @@ void irttp_run_rx_queue(struct tsap_cb *self)
1740 /* Now we can deliver the reassembled skb */ 1740 /* Now we can deliver the reassembled skb */
1741 irttp_do_data_indication(self, skb); 1741 irttp_do_data_indication(self, skb);
1742 } else { 1742 } else {
1743 IRDA_DEBUG(1, "%s(), Truncated frame\n", __FUNCTION__); 1743 IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__);
1744 1744
1745 /* Free the part of the SDU that is too big */ 1745 /* Free the part of the SDU that is too big */
1746 dev_kfree_skb(skb); 1746 dev_kfree_skb(skb);
diff --git a/net/irda/parameters.c b/net/irda/parameters.c
index 722bbe044d9c..fc1a20565e2d 100644
--- a/net/irda/parameters.c
+++ b/net/irda/parameters.c
@@ -148,23 +148,23 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi,
148 */ 148 */
149 if (p.pl == 0) { 149 if (p.pl == 0) {
150 if (p.pv.i < 0xff) { 150 if (p.pv.i < 0xff) {
151 IRDA_DEBUG(2, "%s(), using 1 byte\n", __FUNCTION__); 151 IRDA_DEBUG(2, "%s(), using 1 byte\n", __func__);
152 p.pl = 1; 152 p.pl = 1;
153 } else if (p.pv.i < 0xffff) { 153 } else if (p.pv.i < 0xffff) {
154 IRDA_DEBUG(2, "%s(), using 2 bytes\n", __FUNCTION__); 154 IRDA_DEBUG(2, "%s(), using 2 bytes\n", __func__);
155 p.pl = 2; 155 p.pl = 2;
156 } else { 156 } else {
157 IRDA_DEBUG(2, "%s(), using 4 bytes\n", __FUNCTION__); 157 IRDA_DEBUG(2, "%s(), using 4 bytes\n", __func__);
158 p.pl = 4; /* Default length */ 158 p.pl = 4; /* Default length */
159 } 159 }
160 } 160 }
161 /* Check if buffer is long enough for insertion */ 161 /* Check if buffer is long enough for insertion */
162 if (len < (2+p.pl)) { 162 if (len < (2+p.pl)) {
163 IRDA_WARNING("%s: buffer too short for insertion!\n", 163 IRDA_WARNING("%s: buffer too short for insertion!\n",
164 __FUNCTION__); 164 __func__);
165 return -1; 165 return -1;
166 } 166 }
167 IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __FUNCTION__, 167 IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__,
168 p.pi, p.pl, p.pv.i); 168 p.pi, p.pl, p.pv.i);
169 switch (p.pl) { 169 switch (p.pl) {
170 case 1: 170 case 1:
@@ -187,7 +187,7 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi,
187 break; 187 break;
188 default: 188 default:
189 IRDA_WARNING("%s: length %d not supported\n", 189 IRDA_WARNING("%s: length %d not supported\n",
190 __FUNCTION__, p.pl); 190 __func__, p.pl);
191 /* Skip parameter */ 191 /* Skip parameter */
192 return -1; 192 return -1;
193 } 193 }
@@ -218,7 +218,7 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
218 if (len < (2+p.pl)) { 218 if (len < (2+p.pl)) {
219 IRDA_WARNING("%s: buffer too short for parsing! " 219 IRDA_WARNING("%s: buffer too short for parsing! "
220 "Need %d bytes, but len is only %d\n", 220 "Need %d bytes, but len is only %d\n",
221 __FUNCTION__, p.pl, len); 221 __func__, p.pl, len);
222 return -1; 222 return -1;
223 } 223 }
224 224
@@ -230,7 +230,7 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
230 if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) { 230 if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) {
231 IRDA_ERROR("%s: invalid parameter length! " 231 IRDA_ERROR("%s: invalid parameter length! "
232 "Expected %d bytes, but value had %d bytes!\n", 232 "Expected %d bytes, but value had %d bytes!\n",
233 __FUNCTION__, type & PV_MASK, p.pl); 233 __func__, type & PV_MASK, p.pl);
234 234
235 /* Most parameters are bit/byte fields or little endian, 235 /* Most parameters are bit/byte fields or little endian,
236 * so it's ok to only extract a subset of it (the subset 236 * so it's ok to only extract a subset of it (the subset
@@ -268,13 +268,13 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
268 break; 268 break;
269 default: 269 default:
270 IRDA_WARNING("%s: length %d not supported\n", 270 IRDA_WARNING("%s: length %d not supported\n",
271 __FUNCTION__, p.pl); 271 __func__, p.pl);
272 272
273 /* Skip parameter */ 273 /* Skip parameter */
274 return p.pl+2; 274 return p.pl+2;
275 } 275 }
276 276
277 IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __FUNCTION__, 277 IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__,
278 p.pi, p.pl, p.pv.i); 278 p.pi, p.pl, p.pv.i);
279 /* Call handler for this parameter */ 279 /* Call handler for this parameter */
280 err = (*func)(self, &p, PV_PUT); 280 err = (*func)(self, &p, PV_PUT);
@@ -294,19 +294,19 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
294 irda_param_t p; 294 irda_param_t p;
295 int err; 295 int err;
296 296
297 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 297 IRDA_DEBUG(2, "%s()\n", __func__);
298 298
299 p.pi = pi; /* In case handler needs to know */ 299 p.pi = pi; /* In case handler needs to know */
300 p.pl = buf[1]; /* Extract length of value */ 300 p.pl = buf[1]; /* Extract length of value */
301 301
302 IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __FUNCTION__, 302 IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__,
303 p.pi, p.pl); 303 p.pi, p.pl);
304 304
305 /* Check if buffer is long enough for parsing */ 305 /* Check if buffer is long enough for parsing */
306 if (len < (2+p.pl)) { 306 if (len < (2+p.pl)) {
307 IRDA_WARNING("%s: buffer too short for parsing! " 307 IRDA_WARNING("%s: buffer too short for parsing! "
308 "Need %d bytes, but len is only %d\n", 308 "Need %d bytes, but len is only %d\n",
309 __FUNCTION__, p.pl, len); 309 __func__, p.pl, len);
310 return -1; 310 return -1;
311 } 311 }
312 312
@@ -314,7 +314,7 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
314 * checked that the buffer is long enough */ 314 * checked that the buffer is long enough */
315 strncpy(str, buf+2, p.pl); 315 strncpy(str, buf+2, p.pl);
316 316
317 IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __FUNCTION__, 317 IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __func__,
318 (__u8) str[0], (__u8) str[1]); 318 (__u8) str[0], (__u8) str[1]);
319 319
320 /* Null terminate string */ 320 /* Null terminate string */
@@ -345,11 +345,11 @@ static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi,
345 if (len < (2+p.pl)) { 345 if (len < (2+p.pl)) {
346 IRDA_WARNING("%s: buffer too short for parsing! " 346 IRDA_WARNING("%s: buffer too short for parsing! "
347 "Need %d bytes, but len is only %d\n", 347 "Need %d bytes, but len is only %d\n",
348 __FUNCTION__, p.pl, len); 348 __func__, p.pl, len);
349 return -1; 349 return -1;
350 } 350 }
351 351
352 IRDA_DEBUG(0, "%s(), not impl\n", __FUNCTION__); 352 IRDA_DEBUG(0, "%s(), not impl\n", __func__);
353 353
354 return p.pl+2; /* Extracted pl+2 bytes */ 354 return p.pl+2; /* Extracted pl+2 bytes */
355} 355}
@@ -473,7 +473,7 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
473 (pi_minor > info->tables[pi_major].len-1)) 473 (pi_minor > info->tables[pi_major].len-1))
474 { 474 {
475 IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", 475 IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n",
476 __FUNCTION__, pi); 476 __func__, pi);
477 477
478 /* Skip this parameter */ 478 /* Skip this parameter */
479 return -1; 479 return -1;
@@ -487,7 +487,7 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
487 487
488 /* Check if handler has been implemented */ 488 /* Check if handler has been implemented */
489 if (!pi_minor_info->func) { 489 if (!pi_minor_info->func) {
490 IRDA_MESSAGE("%s: no handler for pi=%#x\n", __FUNCTION__, pi); 490 IRDA_MESSAGE("%s: no handler for pi=%#x\n", __func__, pi);
491 /* Skip this parameter */ 491 /* Skip this parameter */
492 return -1; 492 return -1;
493 } 493 }
@@ -527,7 +527,7 @@ static int irda_param_extract(void *self, __u8 *buf, int len,
527 (pi_minor > info->tables[pi_major].len-1)) 527 (pi_minor > info->tables[pi_major].len-1))
528 { 528 {
529 IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", 529 IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n",
530 __FUNCTION__, buf[0]); 530 __func__, buf[0]);
531 531
532 /* Skip this parameter */ 532 /* Skip this parameter */
533 return 2 + buf[n + 1]; /* Continue */ 533 return 2 + buf[n + 1]; /* Continue */
@@ -539,13 +539,13 @@ static int irda_param_extract(void *self, __u8 *buf, int len,
539 /* Find expected data type for this parameter identifier (pi)*/ 539 /* Find expected data type for this parameter identifier (pi)*/
540 type = pi_minor_info->type; 540 type = pi_minor_info->type;
541 541
542 IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __FUNCTION__, 542 IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __func__,
543 pi_major, pi_minor, type); 543 pi_major, pi_minor, type);
544 544
545 /* Check if handler has been implemented */ 545 /* Check if handler has been implemented */
546 if (!pi_minor_info->func) { 546 if (!pi_minor_info->func) {
547 IRDA_MESSAGE("%s: no handler for pi=%#x\n", 547 IRDA_MESSAGE("%s: no handler for pi=%#x\n",
548 __FUNCTION__, buf[n]); 548 __func__, buf[n]);
549 /* Skip this parameter */ 549 /* Skip this parameter */
550 return 2 + buf[n + 1]; /* Continue */ 550 return 2 + buf[n + 1]; /* Continue */
551 } 551 }
diff --git a/net/irda/qos.c b/net/irda/qos.c
index aeb18cf1dcae..2b00974e5bae 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -201,7 +201,7 @@ static int msb_index (__u16 word)
201 * it's very likely the peer. - Jean II */ 201 * it's very likely the peer. - Jean II */
202 if (word == 0) { 202 if (word == 0) {
203 IRDA_WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n", 203 IRDA_WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n",
204 __FUNCTION__); 204 __func__);
205 /* The only safe choice (we don't know the array size) */ 205 /* The only safe choice (we don't know the array size) */
206 word = 0x1; 206 word = 0x1;
207 } 207 }
@@ -342,7 +342,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
342 __u32 line_capacity; 342 __u32 line_capacity;
343 int index; 343 int index;
344 344
345 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 345 IRDA_DEBUG(2, "%s()\n", __func__);
346 346
347 /* 347 /*
348 * Make sure the mintt is sensible. 348 * Make sure the mintt is sensible.
@@ -352,7 +352,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
352 int i; 352 int i;
353 353
354 IRDA_WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n", 354 IRDA_WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n",
355 __FUNCTION__, sysctl_min_tx_turn_time); 355 __func__, sysctl_min_tx_turn_time);
356 356
357 /* We don't really need bits, but easier this way */ 357 /* We don't really need bits, but easier this way */
358 i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times, 358 i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times,
@@ -370,7 +370,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
370 { 370 {
371 IRDA_DEBUG(0, 371 IRDA_DEBUG(0,
372 "%s(), adjusting max turn time from %d to 500 ms\n", 372 "%s(), adjusting max turn time from %d to 500 ms\n",
373 __FUNCTION__, qos->max_turn_time.value); 373 __func__, qos->max_turn_time.value);
374 qos->max_turn_time.value = 500; 374 qos->max_turn_time.value = 500;
375 } 375 }
376 376
@@ -386,7 +386,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
386 while ((qos->data_size.value > line_capacity) && (index > 0)) { 386 while ((qos->data_size.value > line_capacity) && (index > 0)) {
387 qos->data_size.value = data_sizes[index--]; 387 qos->data_size.value = data_sizes[index--];
388 IRDA_DEBUG(2, "%s(), reducing data size to %d\n", 388 IRDA_DEBUG(2, "%s(), reducing data size to %d\n",
389 __FUNCTION__, qos->data_size.value); 389 __func__, qos->data_size.value);
390 } 390 }
391#else /* Use method described in section 6.6.11 of IrLAP */ 391#else /* Use method described in section 6.6.11 of IrLAP */
392 while (irlap_requested_line_capacity(qos) > line_capacity) { 392 while (irlap_requested_line_capacity(qos) > line_capacity) {
@@ -396,14 +396,14 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
396 if (qos->window_size.value > 1) { 396 if (qos->window_size.value > 1) {
397 qos->window_size.value--; 397 qos->window_size.value--;
398 IRDA_DEBUG(2, "%s(), reducing window size to %d\n", 398 IRDA_DEBUG(2, "%s(), reducing window size to %d\n",
399 __FUNCTION__, qos->window_size.value); 399 __func__, qos->window_size.value);
400 } else if (index > 1) { 400 } else if (index > 1) {
401 qos->data_size.value = data_sizes[index--]; 401 qos->data_size.value = data_sizes[index--];
402 IRDA_DEBUG(2, "%s(), reducing data size to %d\n", 402 IRDA_DEBUG(2, "%s(), reducing data size to %d\n",
403 __FUNCTION__, qos->data_size.value); 403 __func__, qos->data_size.value);
404 } else { 404 } else {
405 IRDA_WARNING("%s(), nothing more we can do!\n", 405 IRDA_WARNING("%s(), nothing more we can do!\n",
406 __FUNCTION__); 406 __func__);
407 } 407 }
408 } 408 }
409#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ 409#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
@@ -538,7 +538,7 @@ static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get)
538 if (get) { 538 if (get) {
539 param->pv.i = self->qos_rx.baud_rate.bits; 539 param->pv.i = self->qos_rx.baud_rate.bits;
540 IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n", 540 IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n",
541 __FUNCTION__, param->pv.i); 541 __func__, param->pv.i);
542 } else { 542 } else {
543 /* 543 /*
544 * Stations must agree on baud rate, so calculate 544 * Stations must agree on baud rate, so calculate
@@ -711,7 +711,7 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time)
711 int i,j; 711 int i,j;
712 712
713 IRDA_DEBUG(2, "%s(), speed=%d, max_turn_time=%d\n", 713 IRDA_DEBUG(2, "%s(), speed=%d, max_turn_time=%d\n",
714 __FUNCTION__, speed, max_turn_time); 714 __func__, speed, max_turn_time);
715 715
716 i = value_index(speed, baud_rates, 10); 716 i = value_index(speed, baud_rates, 10);
717 j = value_index(max_turn_time, max_turn_times, 4); 717 j = value_index(max_turn_time, max_turn_times, 4);
@@ -722,7 +722,7 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time)
722 line_capacity = max_line_capacities[i][j]; 722 line_capacity = max_line_capacities[i][j];
723 723
724 IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n", 724 IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n",
725 __FUNCTION__, line_capacity); 725 __func__, line_capacity);
726 726
727 return line_capacity; 727 return line_capacity;
728} 728}
@@ -738,7 +738,7 @@ static __u32 irlap_requested_line_capacity(struct qos_info *qos)
738 qos->min_turn_time.value); 738 qos->min_turn_time.value);
739 739
740 IRDA_DEBUG(2, "%s(), requested line capacity=%d\n", 740 IRDA_DEBUG(2, "%s(), requested line capacity=%d\n",
741 __FUNCTION__, line_capacity); 741 __func__, line_capacity);
742 742
743 return line_capacity; 743 return line_capacity;
744} 744}
diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c
index c246983308b8..fd0995b1323a 100644
--- a/net/irda/wrapper.c
+++ b/net/irda/wrapper.c
@@ -106,16 +106,16 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
106 * Nothing to worry about, but we set the default number of 106 * Nothing to worry about, but we set the default number of
107 * BOF's 107 * BOF's
108 */ 108 */
109 IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __FUNCTION__); 109 IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __func__);
110 xbofs = 10; 110 xbofs = 10;
111 } else 111 } else
112 xbofs = cb->xbofs + cb->xbofs_delay; 112 xbofs = cb->xbofs + cb->xbofs_delay;
113 113
114 IRDA_DEBUG(4, "%s(), xbofs=%d\n", __FUNCTION__, xbofs); 114 IRDA_DEBUG(4, "%s(), xbofs=%d\n", __func__, xbofs);
115 115
116 /* Check that we never use more than 115 + 48 xbofs */ 116 /* Check that we never use more than 115 + 48 xbofs */
117 if (xbofs > 163) { 117 if (xbofs > 163) {
118 IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __FUNCTION__, 118 IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __func__,
119 xbofs); 119 xbofs);
120 xbofs = 163; 120 xbofs = 163;
121 } 121 }
@@ -135,7 +135,7 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
135 */ 135 */
136 if(n >= (buffsize-5)) { 136 if(n >= (buffsize-5)) {
137 IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n", 137 IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n",
138 __FUNCTION__, n); 138 __func__, n);
139 return n; 139 return n;
140 } 140 }
141 141
@@ -287,7 +287,7 @@ async_unwrap_bof(struct net_device *dev,
287 /* Not supposed to happen, the previous frame is not 287 /* Not supposed to happen, the previous frame is not
288 * finished - Jean II */ 288 * finished - Jean II */
289 IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n", 289 IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n",
290 __FUNCTION__); 290 __func__);
291 stats->rx_errors++; 291 stats->rx_errors++;
292 stats->rx_missed_errors++; 292 stats->rx_missed_errors++;
293 irda_device_set_media_busy(dev, TRUE); 293 irda_device_set_media_busy(dev, TRUE);
@@ -360,7 +360,7 @@ async_unwrap_eof(struct net_device *dev,
360 /* Wrong CRC, discard frame! */ 360 /* Wrong CRC, discard frame! */
361 irda_device_set_media_busy(dev, TRUE); 361 irda_device_set_media_busy(dev, TRUE);
362 362
363 IRDA_DEBUG(1, "%s(), crc error\n", __FUNCTION__); 363 IRDA_DEBUG(1, "%s(), crc error\n", __func__);
364 stats->rx_errors++; 364 stats->rx_errors++;
365 stats->rx_crc_errors++; 365 stats->rx_crc_errors++;
366 } 366 }
@@ -386,7 +386,7 @@ async_unwrap_ce(struct net_device *dev,
386 break; 386 break;
387 387
388 case LINK_ESCAPE: 388 case LINK_ESCAPE:
389 IRDA_WARNING("%s: state not defined\n", __FUNCTION__); 389 IRDA_WARNING("%s: state not defined\n", __func__);
390 break; 390 break;
391 391
392 case BEGIN_FRAME: 392 case BEGIN_FRAME:
@@ -421,7 +421,7 @@ async_unwrap_other(struct net_device *dev,
421#endif 421#endif
422 } else { 422 } else {
423 IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", 423 IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n",
424 __FUNCTION__); 424 __func__);
425 rx_buff->state = OUTSIDE_FRAME; 425 rx_buff->state = OUTSIDE_FRAME;
426 } 426 }
427 break; 427 break;
@@ -440,7 +440,7 @@ async_unwrap_other(struct net_device *dev,
440 rx_buff->state = INSIDE_FRAME; 440 rx_buff->state = INSIDE_FRAME;
441 } else { 441 } else {
442 IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", 442 IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n",
443 __FUNCTION__); 443 __func__);
444 rx_buff->state = OUTSIDE_FRAME; 444 rx_buff->state = OUTSIDE_FRAME;
445 } 445 }
446 break; 446 break;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index fee22caf1bad..7b0038f45b16 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -53,7 +53,7 @@ static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
53static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); 53static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
54 54
55static struct iucv_sock_list iucv_sk_list = { 55static struct iucv_sock_list iucv_sk_list = {
56 .lock = RW_LOCK_UNLOCKED, 56 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
57 .autobind_name = ATOMIC_INIT(0) 57 .autobind_name = ATOMIC_INIT(0)
58}; 58};
59 59
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index d764f4c1b7e4..918970762131 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -795,7 +795,6 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
795 union iucv_param *parm; 795 union iucv_param *parm;
796 int rc; 796 int rc;
797 797
798 BUG_ON(in_atomic());
799 spin_lock_bh(&iucv_table_lock); 798 spin_lock_bh(&iucv_table_lock);
800 iucv_cleanup_queue(); 799 iucv_cleanup_queue();
801 parm = iucv_param[smp_processor_id()]; 800 parm = iucv_param[smp_processor_id()];
@@ -1609,13 +1608,10 @@ static int __init iucv_init(void)
1609 rc = register_external_interrupt(0x4000, iucv_external_interrupt); 1608 rc = register_external_interrupt(0x4000, iucv_external_interrupt);
1610 if (rc) 1609 if (rc)
1611 goto out; 1610 goto out;
1612 rc = bus_register(&iucv_bus);
1613 if (rc)
1614 goto out_int;
1615 iucv_root = s390_root_dev_register("iucv"); 1611 iucv_root = s390_root_dev_register("iucv");
1616 if (IS_ERR(iucv_root)) { 1612 if (IS_ERR(iucv_root)) {
1617 rc = PTR_ERR(iucv_root); 1613 rc = PTR_ERR(iucv_root);
1618 goto out_bus; 1614 goto out_int;
1619 } 1615 }
1620 1616
1621 for_each_online_cpu(cpu) { 1617 for_each_online_cpu(cpu) {
@@ -1635,13 +1631,20 @@ static int __init iucv_init(void)
1635 goto out_free; 1631 goto out_free;
1636 } 1632 }
1637 } 1633 }
1638 register_hotcpu_notifier(&iucv_cpu_notifier); 1634 rc = register_hotcpu_notifier(&iucv_cpu_notifier);
1635 if (rc)
1636 goto out_free;
1639 ASCEBC(iucv_error_no_listener, 16); 1637 ASCEBC(iucv_error_no_listener, 16);
1640 ASCEBC(iucv_error_no_memory, 16); 1638 ASCEBC(iucv_error_no_memory, 16);
1641 ASCEBC(iucv_error_pathid, 16); 1639 ASCEBC(iucv_error_pathid, 16);
1642 iucv_available = 1; 1640 iucv_available = 1;
1641 rc = bus_register(&iucv_bus);
1642 if (rc)
1643 goto out_cpu;
1643 return 0; 1644 return 0;
1644 1645
1646out_cpu:
1647 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1645out_free: 1648out_free:
1646 for_each_possible_cpu(cpu) { 1649 for_each_possible_cpu(cpu) {
1647 kfree(iucv_param[cpu]); 1650 kfree(iucv_param[cpu]);
@@ -1650,8 +1653,6 @@ out_free:
1650 iucv_irq_data[cpu] = NULL; 1653 iucv_irq_data[cpu] = NULL;
1651 } 1654 }
1652 s390_root_dev_unregister(iucv_root); 1655 s390_root_dev_unregister(iucv_root);
1653out_bus:
1654 bus_unregister(&iucv_bus);
1655out_int: 1656out_int:
1656 unregister_external_interrupt(0x4000, iucv_external_interrupt); 1657 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1657out: 1658out:
diff --git a/net/key/af_key.c b/net/key/af_key.c
index e9ef9af4a53b..1fb0fe42a72e 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -48,6 +48,17 @@ struct pfkey_sock {
48 struct sock sk; 48 struct sock sk;
49 int registered; 49 int registered;
50 int promisc; 50 int promisc;
51
52 struct {
53 uint8_t msg_version;
54 uint32_t msg_pid;
55 int (*dump)(struct pfkey_sock *sk);
56 void (*done)(struct pfkey_sock *sk);
57 union {
58 struct xfrm_policy_walk policy;
59 struct xfrm_state_walk state;
60 } u;
61 } dump;
51}; 62};
52 63
53static inline struct pfkey_sock *pfkey_sk(struct sock *sk) 64static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
@@ -55,6 +66,27 @@ static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
55 return (struct pfkey_sock *)sk; 66 return (struct pfkey_sock *)sk;
56} 67}
57 68
69static int pfkey_can_dump(struct sock *sk)
70{
71 if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf)
72 return 1;
73 return 0;
74}
75
76static int pfkey_do_dump(struct pfkey_sock *pfk)
77{
78 int rc;
79
80 rc = pfk->dump.dump(pfk);
81 if (rc == -ENOBUFS)
82 return 0;
83
84 pfk->dump.done(pfk);
85 pfk->dump.dump = NULL;
86 pfk->dump.done = NULL;
87 return rc;
88}
89
58static void pfkey_sock_destruct(struct sock *sk) 90static void pfkey_sock_destruct(struct sock *sk)
59{ 91{
60 skb_queue_purge(&sk->sk_receive_queue); 92 skb_queue_purge(&sk->sk_receive_queue);
@@ -1709,45 +1741,60 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
1709 return 0; 1741 return 0;
1710} 1742}
1711 1743
1712struct pfkey_dump_data
1713{
1714 struct sk_buff *skb;
1715 struct sadb_msg *hdr;
1716 struct sock *sk;
1717};
1718
1719static int dump_sa(struct xfrm_state *x, int count, void *ptr) 1744static int dump_sa(struct xfrm_state *x, int count, void *ptr)
1720{ 1745{
1721 struct pfkey_dump_data *data = ptr; 1746 struct pfkey_sock *pfk = ptr;
1722 struct sk_buff *out_skb; 1747 struct sk_buff *out_skb;
1723 struct sadb_msg *out_hdr; 1748 struct sadb_msg *out_hdr;
1724 1749
1750 if (!pfkey_can_dump(&pfk->sk))
1751 return -ENOBUFS;
1752
1725 out_skb = pfkey_xfrm_state2msg(x); 1753 out_skb = pfkey_xfrm_state2msg(x);
1726 if (IS_ERR(out_skb)) 1754 if (IS_ERR(out_skb))
1727 return PTR_ERR(out_skb); 1755 return PTR_ERR(out_skb);
1728 1756
1729 out_hdr = (struct sadb_msg *) out_skb->data; 1757 out_hdr = (struct sadb_msg *) out_skb->data;
1730 out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; 1758 out_hdr->sadb_msg_version = pfk->dump.msg_version;
1731 out_hdr->sadb_msg_type = SADB_DUMP; 1759 out_hdr->sadb_msg_type = SADB_DUMP;
1732 out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto); 1760 out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
1733 out_hdr->sadb_msg_errno = 0; 1761 out_hdr->sadb_msg_errno = 0;
1734 out_hdr->sadb_msg_reserved = 0; 1762 out_hdr->sadb_msg_reserved = 0;
1735 out_hdr->sadb_msg_seq = count; 1763 out_hdr->sadb_msg_seq = count;
1736 out_hdr->sadb_msg_pid = data->hdr->sadb_msg_pid; 1764 out_hdr->sadb_msg_pid = pfk->dump.msg_pid;
1737 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, data->sk); 1765 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk);
1738 return 0; 1766 return 0;
1739} 1767}
1740 1768
1769static int pfkey_dump_sa(struct pfkey_sock *pfk)
1770{
1771 return xfrm_state_walk(&pfk->dump.u.state, dump_sa, (void *) pfk);
1772}
1773
1774static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
1775{
1776 xfrm_state_walk_done(&pfk->dump.u.state);
1777}
1778
1741static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 1779static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
1742{ 1780{
1743 u8 proto; 1781 u8 proto;
1744 struct pfkey_dump_data data = { .skb = skb, .hdr = hdr, .sk = sk }; 1782 struct pfkey_sock *pfk = pfkey_sk(sk);
1783
1784 if (pfk->dump.dump != NULL)
1785 return -EBUSY;
1745 1786
1746 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1787 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1747 if (proto == 0) 1788 if (proto == 0)
1748 return -EINVAL; 1789 return -EINVAL;
1749 1790
1750 return xfrm_state_walk(proto, dump_sa, &data); 1791 pfk->dump.msg_version = hdr->sadb_msg_version;
1792 pfk->dump.msg_pid = hdr->sadb_msg_pid;
1793 pfk->dump.dump = pfkey_dump_sa;
1794 pfk->dump.done = pfkey_dump_sa_done;
1795 xfrm_state_walk_init(&pfk->dump.u.state, proto);
1796
1797 return pfkey_do_dump(pfk);
1751} 1798}
1752 1799
1753static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 1800static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
@@ -1780,7 +1827,9 @@ static int check_reqid(struct xfrm_policy *xp, int dir, int count, void *ptr)
1780 1827
1781static u32 gen_reqid(void) 1828static u32 gen_reqid(void)
1782{ 1829{
1830 struct xfrm_policy_walk walk;
1783 u32 start; 1831 u32 start;
1832 int rc;
1784 static u32 reqid = IPSEC_MANUAL_REQID_MAX; 1833 static u32 reqid = IPSEC_MANUAL_REQID_MAX;
1785 1834
1786 start = reqid; 1835 start = reqid;
@@ -1788,8 +1837,10 @@ static u32 gen_reqid(void)
1788 ++reqid; 1837 ++reqid;
1789 if (reqid == 0) 1838 if (reqid == 0)
1790 reqid = IPSEC_MANUAL_REQID_MAX+1; 1839 reqid = IPSEC_MANUAL_REQID_MAX+1;
1791 if (xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, check_reqid, 1840 xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN);
1792 (void*)&reqid) != -EEXIST) 1841 rc = xfrm_policy_walk(&walk, check_reqid, (void*)&reqid);
1842 xfrm_policy_walk_done(&walk);
1843 if (rc != -EEXIST)
1793 return reqid; 1844 return reqid;
1794 } while (reqid != start); 1845 } while (reqid != start);
1795 return 0; 1846 return 0;
@@ -2241,7 +2292,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2241 goto out; 2292 goto out;
2242 } 2293 }
2243 2294
2244 err = security_xfrm_policy_alloc(xp, uctx); 2295 err = security_xfrm_policy_alloc(&xp->security, uctx);
2245 kfree(uctx); 2296 kfree(uctx);
2246 2297
2247 if (err) 2298 if (err)
@@ -2301,10 +2352,11 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2301 int err; 2352 int err;
2302 struct sadb_address *sa; 2353 struct sadb_address *sa;
2303 struct sadb_x_policy *pol; 2354 struct sadb_x_policy *pol;
2304 struct xfrm_policy *xp, tmp; 2355 struct xfrm_policy *xp;
2305 struct xfrm_selector sel; 2356 struct xfrm_selector sel;
2306 struct km_event c; 2357 struct km_event c;
2307 struct sadb_x_sec_ctx *sec_ctx; 2358 struct sadb_x_sec_ctx *sec_ctx;
2359 struct xfrm_sec_ctx *pol_ctx;
2308 2360
2309 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 2361 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
2310 ext_hdrs[SADB_EXT_ADDRESS_DST-1]) || 2362 ext_hdrs[SADB_EXT_ADDRESS_DST-1]) ||
@@ -2334,25 +2386,23 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2334 sel.dport_mask = htons(0xffff); 2386 sel.dport_mask = htons(0xffff);
2335 2387
2336 sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1]; 2388 sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
2337 memset(&tmp, 0, sizeof(struct xfrm_policy));
2338
2339 if (sec_ctx != NULL) { 2389 if (sec_ctx != NULL) {
2340 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 2390 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
2341 2391
2342 if (!uctx) 2392 if (!uctx)
2343 return -ENOMEM; 2393 return -ENOMEM;
2344 2394
2345 err = security_xfrm_policy_alloc(&tmp, uctx); 2395 err = security_xfrm_policy_alloc(&pol_ctx, uctx);
2346 kfree(uctx); 2396 kfree(uctx);
2347
2348 if (err) 2397 if (err)
2349 return err; 2398 return err;
2350 } 2399 } else
2351 2400 pol_ctx = NULL;
2352 xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir-1,
2353 &sel, tmp.security, 1, &err);
2354 security_xfrm_policy_free(&tmp);
2355 2401
2402 xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN,
2403 pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
2404 1, &err);
2405 security_xfrm_policy_free(pol_ctx);
2356 if (xp == NULL) 2406 if (xp == NULL)
2357 return -ENOENT; 2407 return -ENOENT;
2358 2408
@@ -2638,11 +2688,14 @@ out:
2638 2688
2639static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) 2689static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
2640{ 2690{
2641 struct pfkey_dump_data *data = ptr; 2691 struct pfkey_sock *pfk = ptr;
2642 struct sk_buff *out_skb; 2692 struct sk_buff *out_skb;
2643 struct sadb_msg *out_hdr; 2693 struct sadb_msg *out_hdr;
2644 int err; 2694 int err;
2645 2695
2696 if (!pfkey_can_dump(&pfk->sk))
2697 return -ENOBUFS;
2698
2646 out_skb = pfkey_xfrm_policy2msg_prep(xp); 2699 out_skb = pfkey_xfrm_policy2msg_prep(xp);
2647 if (IS_ERR(out_skb)) 2700 if (IS_ERR(out_skb))
2648 return PTR_ERR(out_skb); 2701 return PTR_ERR(out_skb);
@@ -2652,21 +2705,40 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
2652 return err; 2705 return err;
2653 2706
2654 out_hdr = (struct sadb_msg *) out_skb->data; 2707 out_hdr = (struct sadb_msg *) out_skb->data;
2655 out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; 2708 out_hdr->sadb_msg_version = pfk->dump.msg_version;
2656 out_hdr->sadb_msg_type = SADB_X_SPDDUMP; 2709 out_hdr->sadb_msg_type = SADB_X_SPDDUMP;
2657 out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; 2710 out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2658 out_hdr->sadb_msg_errno = 0; 2711 out_hdr->sadb_msg_errno = 0;
2659 out_hdr->sadb_msg_seq = count; 2712 out_hdr->sadb_msg_seq = count;
2660 out_hdr->sadb_msg_pid = data->hdr->sadb_msg_pid; 2713 out_hdr->sadb_msg_pid = pfk->dump.msg_pid;
2661 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, data->sk); 2714 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk);
2662 return 0; 2715 return 0;
2663} 2716}
2664 2717
2718static int pfkey_dump_sp(struct pfkey_sock *pfk)
2719{
2720 return xfrm_policy_walk(&pfk->dump.u.policy, dump_sp, (void *) pfk);
2721}
2722
2723static void pfkey_dump_sp_done(struct pfkey_sock *pfk)
2724{
2725 xfrm_policy_walk_done(&pfk->dump.u.policy);
2726}
2727
2665static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 2728static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
2666{ 2729{
2667 struct pfkey_dump_data data = { .skb = skb, .hdr = hdr, .sk = sk }; 2730 struct pfkey_sock *pfk = pfkey_sk(sk);
2731
2732 if (pfk->dump.dump != NULL)
2733 return -EBUSY;
2734
2735 pfk->dump.msg_version = hdr->sadb_msg_version;
2736 pfk->dump.msg_pid = hdr->sadb_msg_pid;
2737 pfk->dump.dump = pfkey_dump_sp;
2738 pfk->dump.done = pfkey_dump_sp_done;
2739 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
2668 2740
2669 return xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_sp, &data); 2741 return pfkey_do_dump(pfk);
2670} 2742}
2671 2743
2672static int key_notify_policy_flush(struct km_event *c) 2744static int key_notify_policy_flush(struct km_event *c)
@@ -3225,7 +3297,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3225 if ((*dir = verify_sec_ctx_len(p))) 3297 if ((*dir = verify_sec_ctx_len(p)))
3226 goto out; 3298 goto out;
3227 uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 3299 uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
3228 *dir = security_xfrm_policy_alloc(xp, uctx); 3300 *dir = security_xfrm_policy_alloc(&xp->security, uctx);
3229 kfree(uctx); 3301 kfree(uctx);
3230 3302
3231 if (*dir) 3303 if (*dir)
@@ -3671,6 +3743,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
3671 int flags) 3743 int flags)
3672{ 3744{
3673 struct sock *sk = sock->sk; 3745 struct sock *sk = sock->sk;
3746 struct pfkey_sock *pfk = pfkey_sk(sk);
3674 struct sk_buff *skb; 3747 struct sk_buff *skb;
3675 int copied, err; 3748 int copied, err;
3676 3749
@@ -3698,6 +3771,10 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
3698 3771
3699 err = (flags & MSG_TRUNC) ? skb->len : copied; 3772 err = (flags & MSG_TRUNC) ? skb->len : copied;
3700 3773
3774 if (pfk->dump.dump != NULL &&
3775 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
3776 pfkey_do_dump(pfk);
3777
3701out_free: 3778out_free:
3702 skb_free_datagram(sk, skb); 3779 skb_free_datagram(sk, skb);
3703out: 3780out:
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8c50eb430c19..97101dcde4c0 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -188,7 +188,7 @@ static int llc_ui_release(struct socket *sock)
188 sock_hold(sk); 188 sock_hold(sk);
189 lock_sock(sk); 189 lock_sock(sk);
190 llc = llc_sk(sk); 190 llc = llc_sk(sk);
191 dprintk("%s: closing local(%02X) remote(%02X)\n", __FUNCTION__, 191 dprintk("%s: closing local(%02X) remote(%02X)\n", __func__,
192 llc->laddr.lsap, llc->daddr.lsap); 192 llc->laddr.lsap, llc->daddr.lsap);
193 if (!llc_send_disc(sk)) 193 if (!llc_send_disc(sk))
194 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 194 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
@@ -298,7 +298,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
298 struct llc_sap *sap; 298 struct llc_sap *sap;
299 int rc = -EINVAL; 299 int rc = -EINVAL;
300 300
301 dprintk("%s: binding %02X\n", __FUNCTION__, addr->sllc_sap); 301 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
302 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) 302 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
303 goto out; 303 goto out;
304 rc = -EAFNOSUPPORT; 304 rc = -EAFNOSUPPORT;
@@ -435,7 +435,7 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr,
435 rc = llc_establish_connection(sk, llc->dev->dev_addr, 435 rc = llc_establish_connection(sk, llc->dev->dev_addr,
436 addr->sllc_mac, addr->sllc_sap); 436 addr->sllc_mac, addr->sllc_sap);
437 if (rc) { 437 if (rc) {
438 dprintk("%s: llc_ui_send_conn failed :-(\n", __FUNCTION__); 438 dprintk("%s: llc_ui_send_conn failed :-(\n", __func__);
439 sock->state = SS_UNCONNECTED; 439 sock->state = SS_UNCONNECTED;
440 sk->sk_state = TCP_CLOSE; 440 sk->sk_state = TCP_CLOSE;
441 goto out; 441 goto out;
@@ -607,7 +607,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
607 struct sk_buff *skb; 607 struct sk_buff *skb;
608 int rc = -EOPNOTSUPP; 608 int rc = -EOPNOTSUPP;
609 609
610 dprintk("%s: accepting on %02X\n", __FUNCTION__, 610 dprintk("%s: accepting on %02X\n", __func__,
611 llc_sk(sk)->laddr.lsap); 611 llc_sk(sk)->laddr.lsap);
612 lock_sock(sk); 612 lock_sock(sk);
613 if (unlikely(sk->sk_type != SOCK_STREAM)) 613 if (unlikely(sk->sk_type != SOCK_STREAM))
@@ -622,7 +622,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
622 if (rc) 622 if (rc)
623 goto out; 623 goto out;
624 } 624 }
625 dprintk("%s: got a new connection on %02X\n", __FUNCTION__, 625 dprintk("%s: got a new connection on %02X\n", __func__,
626 llc_sk(sk)->laddr.lsap); 626 llc_sk(sk)->laddr.lsap);
627 skb = skb_dequeue(&sk->sk_receive_queue); 627 skb = skb_dequeue(&sk->sk_receive_queue);
628 rc = -EINVAL; 628 rc = -EINVAL;
@@ -643,7 +643,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
643 /* put original socket back into a clean listen state. */ 643 /* put original socket back into a clean listen state. */
644 sk->sk_state = TCP_LISTEN; 644 sk->sk_state = TCP_LISTEN;
645 sk->sk_ack_backlog--; 645 sk->sk_ack_backlog--;
646 dprintk("%s: ok success on %02X, client on %02X\n", __FUNCTION__, 646 dprintk("%s: ok success on %02X, client on %02X\n", __func__,
647 llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap); 647 llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap);
648frees: 648frees:
649 kfree_skb(skb); 649 kfree_skb(skb);
@@ -836,7 +836,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
836 size_t size = 0; 836 size_t size = 0;
837 int rc = -EINVAL, copied = 0, hdrlen; 837 int rc = -EINVAL, copied = 0, hdrlen;
838 838
839 dprintk("%s: sending from %02X to %02X\n", __FUNCTION__, 839 dprintk("%s: sending from %02X to %02X\n", __func__,
840 llc->laddr.lsap, llc->daddr.lsap); 840 llc->laddr.lsap, llc->daddr.lsap);
841 lock_sock(sk); 841 lock_sock(sk);
842 if (addr) { 842 if (addr) {
@@ -894,7 +894,7 @@ out:
894 kfree_skb(skb); 894 kfree_skb(skb);
895release: 895release:
896 dprintk("%s: failed sending from %02X to %02X: %d\n", 896 dprintk("%s: failed sending from %02X to %02X: %d\n",
897 __FUNCTION__, llc->laddr.lsap, llc->daddr.lsap, rc); 897 __func__, llc->laddr.lsap, llc->daddr.lsap, rc);
898 } 898 }
899 release_sock(sk); 899 release_sock(sk);
900 return rc ? : copied; 900 return rc ? : copied;
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 71a00225bdb3..019c780512e8 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1430,7 +1430,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
1430{ 1430{
1431 if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) { 1431 if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) {
1432 printk(KERN_WARNING "%s: timer called on closed connection\n", 1432 printk(KERN_WARNING "%s: timer called on closed connection\n",
1433 __FUNCTION__); 1433 __func__);
1434 kfree_skb(skb); 1434 kfree_skb(skb);
1435 } else { 1435 } else {
1436 if (!sock_owned_by_user(sk)) 1436 if (!sock_owned_by_user(sk))
diff --git a/net/llc/llc_c_ev.c b/net/llc/llc_c_ev.c
index c5deda246614..523fdd1cf781 100644
--- a/net/llc/llc_c_ev.c
+++ b/net/llc/llc_c_ev.c
@@ -228,7 +228,7 @@ int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
228 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; 228 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
229 if (!rc) 229 if (!rc)
230 dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", 230 dprintk("%s: matched, state=%d, ns=%d, vr=%d\n",
231 __FUNCTION__, llc_sk(sk)->state, ns, vr); 231 __func__, llc_sk(sk)->state, ns, vr);
232 return rc; 232 return rc;
233} 233}
234 234
@@ -306,7 +306,7 @@ int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
306 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; 306 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
307 if (!rc) 307 if (!rc)
308 dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", 308 dprintk("%s: matched, state=%d, ns=%d, vr=%d\n",
309 __FUNCTION__, llc_sk(sk)->state, ns, vr); 309 __func__, llc_sk(sk)->state, ns, vr);
310 return rc; 310 return rc;
311} 311}
312 312
@@ -511,7 +511,7 @@ int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
511 (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) && 511 (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) &&
512 nr != vs && llc_util_nr_inside_tx_window(sk, nr)) { 512 nr != vs && llc_util_nr_inside_tx_window(sk, nr)) {
513 dprintk("%s: matched, state=%d, vs=%d, nr=%d\n", 513 dprintk("%s: matched, state=%d, vs=%d, nr=%d\n",
514 __FUNCTION__, llc_sk(sk)->state, vs, nr); 514 __func__, llc_sk(sk)->state, vs, nr);
515 rc = 0; 515 rc = 0;
516 } 516 }
517 return rc; 517 return rc;
@@ -530,7 +530,7 @@ int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
530 nr != vs && llc_util_nr_inside_tx_window(sk, nr)) { 530 nr != vs && llc_util_nr_inside_tx_window(sk, nr)) {
531 rc = 0; 531 rc = 0;
532 dprintk("%s: matched, state=%d, vs=%d, nr=%d\n", 532 dprintk("%s: matched, state=%d, vs=%d, nr=%d\n",
533 __FUNCTION__, llc_sk(sk)->state, vs, nr); 533 __func__, llc_sk(sk)->state, vs, nr);
534 } 534 }
535 return rc; 535 return rc;
536} 536}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 441bc18f996d..5c6d89c6d51d 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -73,7 +73,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
73 */ 73 */
74 rc = llc_conn_service(skb->sk, skb); 74 rc = llc_conn_service(skb->sk, skb);
75 if (unlikely(rc != 0)) { 75 if (unlikely(rc != 0)) {
76 printk(KERN_ERR "%s: llc_conn_service failed\n", __FUNCTION__); 76 printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
77 goto out_kfree_skb; 77 goto out_kfree_skb;
78 } 78 }
79 79
@@ -99,7 +99,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
99 * shouldn't happen 99 * shouldn't happen
100 */ 100 */
101 printk(KERN_ERR "%s: sock_queue_rcv_skb failed!\n", 101 printk(KERN_ERR "%s: sock_queue_rcv_skb failed!\n",
102 __FUNCTION__); 102 __func__);
103 kfree_skb(skb); 103 kfree_skb(skb);
104 } 104 }
105 break; 105 break;
@@ -132,13 +132,13 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
132 * FIXME: 132 * FIXME:
133 * RESET is not being notified to upper layers for now 133 * RESET is not being notified to upper layers for now
134 */ 134 */
135 printk(KERN_INFO "%s: received a reset ind!\n", __FUNCTION__); 135 printk(KERN_INFO "%s: received a reset ind!\n", __func__);
136 kfree_skb(skb); 136 kfree_skb(skb);
137 break; 137 break;
138 default: 138 default:
139 if (ev->ind_prim) { 139 if (ev->ind_prim) {
140 printk(KERN_INFO "%s: received unknown %d prim!\n", 140 printk(KERN_INFO "%s: received unknown %d prim!\n",
141 __FUNCTION__, ev->ind_prim); 141 __func__, ev->ind_prim);
142 kfree_skb(skb); 142 kfree_skb(skb);
143 } 143 }
144 /* No indication */ 144 /* No indication */
@@ -179,12 +179,12 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
179 * FIXME: 179 * FIXME:
180 * RESET is not being notified to upper layers for now 180 * RESET is not being notified to upper layers for now
181 */ 181 */
182 printk(KERN_INFO "%s: received a reset conf!\n", __FUNCTION__); 182 printk(KERN_INFO "%s: received a reset conf!\n", __func__);
183 break; 183 break;
184 default: 184 default:
185 if (ev->cfm_prim) { 185 if (ev->cfm_prim) {
186 printk(KERN_INFO "%s: received unknown %d prim!\n", 186 printk(KERN_INFO "%s: received unknown %d prim!\n",
187 __FUNCTION__, ev->cfm_prim); 187 __func__, ev->cfm_prim);
188 break; 188 break;
189 } 189 }
190 goto out_skb_put; /* No confirmation */ 190 goto out_skb_put; /* No confirmation */
@@ -700,7 +700,7 @@ static struct sock *llc_create_incoming_sock(struct sock *sk,
700 struct llc_addr *saddr, 700 struct llc_addr *saddr,
701 struct llc_addr *daddr) 701 struct llc_addr *daddr)
702{ 702{
703 struct sock *newsk = llc_sk_alloc(sk->sk_net, sk->sk_family, GFP_ATOMIC, 703 struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC,
704 sk->sk_prot); 704 sk->sk_prot);
705 struct llc_sock *newllc, *llc = llc_sk(sk); 705 struct llc_sock *newllc, *llc = llc_sk(sk);
706 706
@@ -759,7 +759,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
759 if (!sock_owned_by_user(sk)) 759 if (!sock_owned_by_user(sk))
760 llc_conn_rcv(sk, skb); 760 llc_conn_rcv(sk, skb);
761 else { 761 else {
762 dprintk("%s: adding to backlog...\n", __FUNCTION__); 762 dprintk("%s: adding to backlog...\n", __func__);
763 llc_set_backlog_type(skb, LLC_PACKET); 763 llc_set_backlog_type(skb, LLC_PACKET);
764 sk_add_backlog(sk, skb); 764 sk_add_backlog(sk, skb);
765 } 765 }
@@ -807,7 +807,7 @@ static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
807 else 807 else
808 goto out_kfree_skb; 808 goto out_kfree_skb;
809 } else { 809 } else {
810 printk(KERN_ERR "%s: invalid skb in backlog\n", __FUNCTION__); 810 printk(KERN_ERR "%s: invalid skb in backlog\n", __func__);
811 goto out_kfree_skb; 811 goto out_kfree_skb;
812 } 812 }
813out: 813out:
@@ -874,7 +874,7 @@ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct pr
874#ifdef LLC_REFCNT_DEBUG 874#ifdef LLC_REFCNT_DEBUG
875 atomic_inc(&llc_sock_nr); 875 atomic_inc(&llc_sock_nr);
876 printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk, 876 printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk,
877 __FUNCTION__, atomic_read(&llc_sock_nr)); 877 __func__, atomic_read(&llc_sock_nr));
878#endif 878#endif
879out: 879out:
880 return sk; 880 return sk;
@@ -894,7 +894,7 @@ void llc_sk_free(struct sock *sk)
894 /* Stop all (possibly) running timers */ 894 /* Stop all (possibly) running timers */
895 llc_conn_ac_stop_all_timers(sk, NULL); 895 llc_conn_ac_stop_all_timers(sk, NULL);
896#ifdef DEBUG_LLC_CONN_ALLOC 896#ifdef DEBUG_LLC_CONN_ALLOC
897 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __FUNCTION__, 897 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
898 skb_queue_len(&llc->pdu_unack_q), 898 skb_queue_len(&llc->pdu_unack_q),
899 skb_queue_len(&sk->sk_write_queue)); 899 skb_queue_len(&sk->sk_write_queue));
900#endif 900#endif
@@ -904,13 +904,13 @@ void llc_sk_free(struct sock *sk)
904#ifdef LLC_REFCNT_DEBUG 904#ifdef LLC_REFCNT_DEBUG
905 if (atomic_read(&sk->sk_refcnt) != 1) { 905 if (atomic_read(&sk->sk_refcnt) != 1) {
906 printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n", 906 printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
907 sk, __FUNCTION__, atomic_read(&sk->sk_refcnt)); 907 sk, __func__, atomic_read(&sk->sk_refcnt));
908 printk(KERN_DEBUG "%d LLC sockets are still alive\n", 908 printk(KERN_DEBUG "%d LLC sockets are still alive\n",
909 atomic_read(&llc_sock_nr)); 909 atomic_read(&llc_sock_nr));
910 } else { 910 } else {
911 atomic_dec(&llc_sock_nr); 911 atomic_dec(&llc_sock_nr);
912 printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk, 912 printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk,
913 __FUNCTION__, atomic_read(&llc_sock_nr)); 913 __func__, atomic_read(&llc_sock_nr));
914 } 914 }
915#endif 915#endif
916 sock_put(sk); 916 sock_put(sk);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index bfd2567dd365..1c45f172991e 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -150,7 +150,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
150 int (*rcv)(struct sk_buff *, struct net_device *, 150 int (*rcv)(struct sk_buff *, struct net_device *,
151 struct packet_type *, struct net_device *); 151 struct packet_type *, struct net_device *);
152 152
153 if (dev->nd_net != &init_net) 153 if (dev_net(dev) != &init_net)
154 goto drop; 154 goto drop;
155 155
156 /* 156 /*
@@ -158,7 +158,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
158 * receives, do not try to analyse it. 158 * receives, do not try to analyse it.
159 */ 159 */
160 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { 160 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
161 dprintk("%s: PACKET_OTHERHOST\n", __FUNCTION__); 161 dprintk("%s: PACKET_OTHERHOST\n", __func__);
162 goto drop; 162 goto drop;
163 } 163 }
164 skb = skb_share_check(skb, GFP_ATOMIC); 164 skb = skb_share_check(skb, GFP_ATOMIC);
@@ -171,7 +171,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
171 goto handle_station; 171 goto handle_station;
172 sap = llc_sap_find(pdu->dsap); 172 sap = llc_sap_find(pdu->dsap);
173 if (unlikely(!sap)) {/* unknown SAP */ 173 if (unlikely(!sap)) {/* unknown SAP */
174 dprintk("%s: llc_sap_find(%02X) failed!\n", __FUNCTION__, 174 dprintk("%s: llc_sap_find(%02X) failed!\n", __func__,
175 pdu->dsap); 175 pdu->dsap);
176 goto drop; 176 goto drop;
177 } 177 }
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 45c7c0c3875e..520a5180a4f6 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -32,15 +32,6 @@ config MAC80211_RC_DEFAULT_PID
32 default rate control algorithm. You should choose 32 default rate control algorithm. You should choose
33 this unless you know what you are doing. 33 this unless you know what you are doing.
34 34
35config MAC80211_RC_DEFAULT_SIMPLE
36 bool "Simple rate control algorithm"
37 select MAC80211_RC_SIMPLE
38 ---help---
39 Select the simple rate control as the default rate
40 control algorithm. Note that this is a non-responsive,
41 dumb algorithm. You should choose the PID rate control
42 instead.
43
44config MAC80211_RC_DEFAULT_NONE 35config MAC80211_RC_DEFAULT_NONE
45 bool "No default algorithm" 36 bool "No default algorithm"
46 depends on EMBEDDED 37 depends on EMBEDDED
@@ -57,7 +48,6 @@ comment "build the algorithm into mac80211."
57config MAC80211_RC_DEFAULT 48config MAC80211_RC_DEFAULT
58 string 49 string
59 default "pid" if MAC80211_RC_DEFAULT_PID 50 default "pid" if MAC80211_RC_DEFAULT_PID
60 default "simple" if MAC80211_RC_DEFAULT_SIMPLE
61 default "" 51 default ""
62 52
63config MAC80211_RC_PID 53config MAC80211_RC_PID
@@ -70,16 +60,16 @@ config MAC80211_RC_PID
70 Say Y or M unless you're sure you want to use a 60 Say Y or M unless you're sure you want to use a
71 different rate control algorithm. 61 different rate control algorithm.
72 62
73config MAC80211_RC_SIMPLE 63endmenu
74 tristate "Simple rate control algorithm (DEPRECATED)" 64
65config MAC80211_MESH
66 bool "Enable mac80211 mesh networking (pre-802.11s) support"
67 depends on MAC80211 && EXPERIMENTAL
75 ---help--- 68 ---help---
76 This option enables a very simple, non-responsive TX 69 This options enables support of Draft 802.11s mesh networking.
77 rate control algorithm. This algorithm is deprecated 70 The implementation is based on Draft 1.08 of the Mesh Networking
78 and will be removed from the kernel in the near future. 71 amendment. For more information visit http://o11s.org/.
79 It has been replaced by the PID algorithm.
80 72
81 Say N unless you know what you are doing.
82endmenu
83 73
84config MAC80211_LEDS 74config MAC80211_LEDS
85 bool "Enable LED triggers" 75 bool "Enable LED triggers"
@@ -166,3 +156,10 @@ config MAC80211_VERBOSE_PS_DEBUG
166 ---help--- 156 ---help---
167 Say Y here to print out verbose powersave 157 Say Y here to print out verbose powersave
168 mode debug messages. 158 mode debug messages.
159
160config MAC80211_VERBOSE_MPL_DEBUG
161 bool "Verbose mesh peer link debugging"
162 depends on MAC80211_DEBUG && MAC80211_MESH
163 ---help---
164 Say Y here to print out verbose mesh peer link
165 debug messages.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 54f46bc80cfe..4e5847fd316c 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -10,16 +10,15 @@ rc-pid-m := rc80211_pid.o
10 10
11# mac80211 objects 11# mac80211 objects
12mac80211-y := \ 12mac80211-y := \
13 ieee80211.o \ 13 main.o \
14 ieee80211_ioctl.o \ 14 wext.o \
15 sta_info.o \ 15 sta_info.o \
16 wep.o \ 16 wep.o \
17 wpa.o \ 17 wpa.o \
18 ieee80211_sta.o \ 18 mlme.o \
19 ieee80211_iface.o \ 19 iface.o \
20 ieee80211_rate.o \ 20 rate.o \
21 michael.o \ 21 michael.o \
22 regdomain.o \
23 tkip.o \ 22 tkip.o \
24 aes_ccm.o \ 23 aes_ccm.o \
25 cfg.o \ 24 cfg.o \
@@ -29,7 +28,7 @@ mac80211-y := \
29 util.o \ 28 util.o \
30 event.o 29 event.o
31 30
32mac80211-$(CONFIG_MAC80211_LEDS) += ieee80211_led.o 31mac80211-$(CONFIG_MAC80211_LEDS) += led.o
33mac80211-$(CONFIG_NET_SCHED) += wme.o 32mac80211-$(CONFIG_NET_SCHED) += wme.o
34mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 33mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
35 debugfs.o \ 34 debugfs.o \
@@ -37,11 +36,15 @@ mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
37 debugfs_netdev.o \ 36 debugfs_netdev.o \
38 debugfs_key.o 37 debugfs_key.o
39 38
39mac80211-$(CONFIG_MAC80211_MESH) += \
40 mesh.o \
41 mesh_pathtbl.o \
42 mesh_plink.o \
43 mesh_hwmp.o
44
40 45
41# Build rate control algorithm(s) 46# Build rate control algorithm(s)
42CFLAGS_rc80211_simple.o += -DRC80211_SIMPLE_COMPILE
43CFLAGS_rc80211_pid_algo.o += -DRC80211_PID_COMPILE 47CFLAGS_rc80211_pid_algo.o += -DRC80211_PID_COMPILE
44mac80211-$(CONFIG_MAC80211_RC_SIMPLE) += rc80211_simple.o
45mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc-pid-$(CONFIG_MAC80211_RC_PID)) 48mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc-pid-$(CONFIG_MAC80211_RC_PID))
46 49
47# Modular rate algorithms are assigned to mac80211-m - make separate modules 50# Modular rate algorithms are assigned to mac80211-m - make separate modules
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index e62fe55944b8..59f1691f62c8 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -13,7 +13,7 @@
13#include <linux/err.h> 13#include <linux/err.h>
14 14
15#include <net/mac80211.h> 15#include <net/mac80211.h>
16#include "ieee80211_key.h" 16#include "key.h"
17#include "aes_ccm.h" 17#include "aes_ccm.h"
18 18
19 19
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 22c9619ba776..699d97b8de5e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -14,7 +14,8 @@
14#include <net/cfg80211.h> 14#include <net/cfg80211.h>
15#include "ieee80211_i.h" 15#include "ieee80211_i.h"
16#include "cfg.h" 16#include "cfg.h"
17#include "ieee80211_rate.h" 17#include "rate.h"
18#include "mesh.h"
18 19
19static enum ieee80211_if_types 20static enum ieee80211_if_types
20nl80211_type_to_mac80211_type(enum nl80211_iftype type) 21nl80211_type_to_mac80211_type(enum nl80211_iftype type)
@@ -28,16 +29,26 @@ nl80211_type_to_mac80211_type(enum nl80211_iftype type)
28 return IEEE80211_IF_TYPE_STA; 29 return IEEE80211_IF_TYPE_STA;
29 case NL80211_IFTYPE_MONITOR: 30 case NL80211_IFTYPE_MONITOR:
30 return IEEE80211_IF_TYPE_MNTR; 31 return IEEE80211_IF_TYPE_MNTR;
32#ifdef CONFIG_MAC80211_MESH
33 case NL80211_IFTYPE_MESH_POINT:
34 return IEEE80211_IF_TYPE_MESH_POINT;
35#endif
36 case NL80211_IFTYPE_WDS:
37 return IEEE80211_IF_TYPE_WDS;
31 default: 38 default:
32 return IEEE80211_IF_TYPE_INVALID; 39 return IEEE80211_IF_TYPE_INVALID;
33 } 40 }
34} 41}
35 42
36static int ieee80211_add_iface(struct wiphy *wiphy, char *name, 43static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
37 enum nl80211_iftype type) 44 enum nl80211_iftype type, u32 *flags,
45 struct vif_params *params)
38{ 46{
39 struct ieee80211_local *local = wiphy_priv(wiphy); 47 struct ieee80211_local *local = wiphy_priv(wiphy);
40 enum ieee80211_if_types itype; 48 enum ieee80211_if_types itype;
49 struct net_device *dev;
50 struct ieee80211_sub_if_data *sdata;
51 int err;
41 52
42 if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) 53 if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED))
43 return -ENODEV; 54 return -ENODEV;
@@ -46,7 +57,13 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
46 if (itype == IEEE80211_IF_TYPE_INVALID) 57 if (itype == IEEE80211_IF_TYPE_INVALID)
47 return -EINVAL; 58 return -EINVAL;
48 59
49 return ieee80211_if_add(local->mdev, name, NULL, itype); 60 err = ieee80211_if_add(local->mdev, name, &dev, itype, params);
61 if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags)
62 return err;
63
64 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
65 sdata->u.mntr_flags = *flags;
66 return 0;
50} 67}
51 68
52static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) 69static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
@@ -69,7 +86,8 @@ static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
69} 86}
70 87
71static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, 88static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
72 enum nl80211_iftype type) 89 enum nl80211_iftype type, u32 *flags,
90 struct vif_params *params)
73{ 91{
74 struct ieee80211_local *local = wiphy_priv(wiphy); 92 struct ieee80211_local *local = wiphy_priv(wiphy);
75 struct net_device *dev; 93 struct net_device *dev;
@@ -99,6 +117,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
99 ieee80211_if_reinit(dev); 117 ieee80211_if_reinit(dev);
100 ieee80211_if_set_type(dev, itype); 118 ieee80211_if_set_type(dev, itype);
101 119
120 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
121 ieee80211_if_sta_set_mesh_id(&sdata->u.sta,
122 params->mesh_id_len,
123 params->mesh_id);
124
125 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || !flags)
126 return 0;
127
128 sdata->u.mntr_flags = *flags;
102 return 0; 129 return 0;
103} 130}
104 131
@@ -109,7 +136,8 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
109 struct ieee80211_sub_if_data *sdata; 136 struct ieee80211_sub_if_data *sdata;
110 struct sta_info *sta = NULL; 137 struct sta_info *sta = NULL;
111 enum ieee80211_key_alg alg; 138 enum ieee80211_key_alg alg;
112 int ret; 139 struct ieee80211_key *key;
140 int err;
113 141
114 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 142 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
115 143
@@ -128,21 +156,28 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
128 return -EINVAL; 156 return -EINVAL;
129 } 157 }
130 158
159 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key);
160 if (!key)
161 return -ENOMEM;
162
163 rcu_read_lock();
164
131 if (mac_addr) { 165 if (mac_addr) {
132 sta = sta_info_get(sdata->local, mac_addr); 166 sta = sta_info_get(sdata->local, mac_addr);
133 if (!sta) 167 if (!sta) {
134 return -ENOENT; 168 ieee80211_key_free(key);
169 err = -ENOENT;
170 goto out_unlock;
171 }
135 } 172 }
136 173
137 ret = 0; 174 ieee80211_key_link(key, sdata, sta);
138 if (!ieee80211_key_alloc(sdata, sta, alg, key_idx,
139 params->key_len, params->key))
140 ret = -ENOMEM;
141 175
142 if (sta) 176 err = 0;
143 sta_info_put(sta); 177 out_unlock:
178 rcu_read_unlock();
144 179
145 return ret; 180 return err;
146} 181}
147 182
148static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, 183static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
@@ -154,27 +189,37 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
154 189
155 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 190 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
156 191
192 rcu_read_lock();
193
157 if (mac_addr) { 194 if (mac_addr) {
195 ret = -ENOENT;
196
158 sta = sta_info_get(sdata->local, mac_addr); 197 sta = sta_info_get(sdata->local, mac_addr);
159 if (!sta) 198 if (!sta)
160 return -ENOENT; 199 goto out_unlock;
161 200
162 ret = 0; 201 if (sta->key) {
163 if (sta->key)
164 ieee80211_key_free(sta->key); 202 ieee80211_key_free(sta->key);
165 else 203 WARN_ON(sta->key);
166 ret = -ENOENT; 204 ret = 0;
205 }
167 206
168 sta_info_put(sta); 207 goto out_unlock;
169 return ret;
170 } 208 }
171 209
172 if (!sdata->keys[key_idx]) 210 if (!sdata->keys[key_idx]) {
173 return -ENOENT; 211 ret = -ENOENT;
212 goto out_unlock;
213 }
174 214
175 ieee80211_key_free(sdata->keys[key_idx]); 215 ieee80211_key_free(sdata->keys[key_idx]);
216 WARN_ON(sdata->keys[key_idx]);
176 217
177 return 0; 218 ret = 0;
219 out_unlock:
220 rcu_read_unlock();
221
222 return ret;
178} 223}
179 224
180static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, 225static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
@@ -191,6 +236,8 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
191 u16 iv16; 236 u16 iv16;
192 int err = -ENOENT; 237 int err = -ENOENT;
193 238
239 rcu_read_lock();
240
194 if (mac_addr) { 241 if (mac_addr) {
195 sta = sta_info_get(sdata->local, mac_addr); 242 sta = sta_info_get(sdata->local, mac_addr);
196 if (!sta) 243 if (!sta)
@@ -254,8 +301,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
254 err = 0; 301 err = 0;
255 302
256 out: 303 out:
257 if (sta) 304 rcu_read_unlock();
258 sta_info_put(sta);
259 return err; 305 return err;
260} 306}
261 307
@@ -265,35 +311,83 @@ static int ieee80211_config_default_key(struct wiphy *wiphy,
265{ 311{
266 struct ieee80211_sub_if_data *sdata; 312 struct ieee80211_sub_if_data *sdata;
267 313
314 rcu_read_lock();
315
268 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 316 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
269 ieee80211_set_default_key(sdata, key_idx); 317 ieee80211_set_default_key(sdata, key_idx);
270 318
319 rcu_read_unlock();
320
271 return 0; 321 return 0;
272} 322}
273 323
324static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
325{
326 struct ieee80211_sub_if_data *sdata = sta->sdata;
327
328 sinfo->filled = STATION_INFO_INACTIVE_TIME |
329 STATION_INFO_RX_BYTES |
330 STATION_INFO_TX_BYTES;
331
332 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
333 sinfo->rx_bytes = sta->rx_bytes;
334 sinfo->tx_bytes = sta->tx_bytes;
335
336 if (ieee80211_vif_is_mesh(&sdata->vif)) {
337#ifdef CONFIG_MAC80211_MESH
338 sinfo->filled |= STATION_INFO_LLID |
339 STATION_INFO_PLID |
340 STATION_INFO_PLINK_STATE;
341
342 sinfo->llid = le16_to_cpu(sta->llid);
343 sinfo->plid = le16_to_cpu(sta->plid);
344 sinfo->plink_state = sta->plink_state;
345#endif
346 }
347}
348
349
350static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
351 int idx, u8 *mac, struct station_info *sinfo)
352{
353 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
354 struct sta_info *sta;
355 int ret = -ENOENT;
356
357 rcu_read_lock();
358
359 sta = sta_info_get_by_idx(local, idx, dev);
360 if (sta) {
361 ret = 0;
362 memcpy(mac, sta->addr, ETH_ALEN);
363 sta_set_sinfo(sta, sinfo);
364 }
365
366 rcu_read_unlock();
367
368 return ret;
369}
370
274static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, 371static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
275 u8 *mac, struct station_stats *stats) 372 u8 *mac, struct station_info *sinfo)
276{ 373{
277 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 374 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
278 struct sta_info *sta; 375 struct sta_info *sta;
376 int ret = -ENOENT;
279 377
280 sta = sta_info_get(local, mac); 378 rcu_read_lock();
281 if (!sta)
282 return -ENOENT;
283 379
284 /* XXX: verify sta->dev == dev */ 380 /* XXX: verify sta->dev == dev */
285 381
286 stats->filled = STATION_STAT_INACTIVE_TIME | 382 sta = sta_info_get(local, mac);
287 STATION_STAT_RX_BYTES | 383 if (sta) {
288 STATION_STAT_TX_BYTES; 384 ret = 0;
289 385 sta_set_sinfo(sta, sinfo);
290 stats->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); 386 }
291 stats->rx_bytes = sta->rx_bytes;
292 stats->tx_bytes = sta->tx_bytes;
293 387
294 sta_info_put(sta); 388 rcu_read_unlock();
295 389
296 return 0; 390 return ret;
297} 391}
298 392
299/* 393/*
@@ -486,8 +580,8 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
486 msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ 580 msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
487 msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ 581 msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
488 582
489 skb->dev = sta->dev; 583 skb->dev = sta->sdata->dev;
490 skb->protocol = eth_type_trans(skb, sta->dev); 584 skb->protocol = eth_type_trans(skb, sta->sdata->dev);
491 memset(skb->cb, 0, sizeof(skb->cb)); 585 memset(skb->cb, 0, sizeof(skb->cb));
492 netif_rx(skb); 586 netif_rx(skb);
493} 587}
@@ -498,7 +592,14 @@ static void sta_apply_parameters(struct ieee80211_local *local,
498{ 592{
499 u32 rates; 593 u32 rates;
500 int i, j; 594 int i, j;
501 struct ieee80211_hw_mode *mode; 595 struct ieee80211_supported_band *sband;
596 struct ieee80211_sub_if_data *sdata = sta->sdata;
597
598 /*
599 * FIXME: updating the flags is racy when this function is
600 * called from ieee80211_change_station(), this will
601 * be resolved in a future patch.
602 */
502 603
503 if (params->station_flags & STATION_FLAG_CHANGED) { 604 if (params->station_flags & STATION_FLAG_CHANGED) {
504 sta->flags &= ~WLAN_STA_AUTHORIZED; 605 sta->flags &= ~WLAN_STA_AUTHORIZED;
@@ -514,6 +615,13 @@ static void sta_apply_parameters(struct ieee80211_local *local,
514 sta->flags |= WLAN_STA_WME; 615 sta->flags |= WLAN_STA_WME;
515 } 616 }
516 617
618 /*
619 * FIXME: updating the following information is racy when this
620 * function is called from ieee80211_change_station().
621 * However, all this information should be static so
622 * maybe we should just reject attemps to change it.
623 */
624
517 if (params->aid) { 625 if (params->aid) {
518 sta->aid = params->aid; 626 sta->aid = params->aid;
519 if (sta->aid > IEEE80211_MAX_AID) 627 if (sta->aid > IEEE80211_MAX_AID)
@@ -525,15 +633,27 @@ static void sta_apply_parameters(struct ieee80211_local *local,
525 633
526 if (params->supported_rates) { 634 if (params->supported_rates) {
527 rates = 0; 635 rates = 0;
528 mode = local->oper_hw_mode; 636 sband = local->hw.wiphy->bands[local->oper_channel->band];
637
529 for (i = 0; i < params->supported_rates_len; i++) { 638 for (i = 0; i < params->supported_rates_len; i++) {
530 int rate = (params->supported_rates[i] & 0x7f) * 5; 639 int rate = (params->supported_rates[i] & 0x7f) * 5;
531 for (j = 0; j < mode->num_rates; j++) { 640 for (j = 0; j < sband->n_bitrates; j++) {
532 if (mode->rates[j].rate == rate) 641 if (sband->bitrates[j].bitrate == rate)
533 rates |= BIT(j); 642 rates |= BIT(j);
534 } 643 }
535 } 644 }
536 sta->supp_rates = rates; 645 sta->supp_rates[local->oper_channel->band] = rates;
646 }
647
648 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) {
649 switch (params->plink_action) {
650 case PLINK_ACTION_OPEN:
651 mesh_plink_open(sta);
652 break;
653 case PLINK_ACTION_BLOCK:
654 mesh_plink_block(sta);
655 break;
656 }
537 } 657 }
538} 658}
539 659
@@ -543,18 +663,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
543 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 663 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
544 struct sta_info *sta; 664 struct sta_info *sta;
545 struct ieee80211_sub_if_data *sdata; 665 struct ieee80211_sub_if_data *sdata;
666 int err;
546 667
547 /* Prevent a race with changing the rate control algorithm */ 668 /* Prevent a race with changing the rate control algorithm */
548 if (!netif_running(dev)) 669 if (!netif_running(dev))
549 return -ENETDOWN; 670 return -ENETDOWN;
550 671
551 /* XXX: get sta belonging to dev */
552 sta = sta_info_get(local, mac);
553 if (sta) {
554 sta_info_put(sta);
555 return -EEXIST;
556 }
557
558 if (params->vlan) { 672 if (params->vlan) {
559 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 673 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
560 674
@@ -564,22 +678,36 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
564 } else 678 } else
565 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 679 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
566 680
567 sta = sta_info_add(local, dev, mac, GFP_KERNEL); 681 if (compare_ether_addr(mac, dev->dev_addr) == 0)
682 return -EINVAL;
683
684 if (is_multicast_ether_addr(mac))
685 return -EINVAL;
686
687 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
568 if (!sta) 688 if (!sta)
569 return -ENOMEM; 689 return -ENOMEM;
570 690
571 sta->dev = sdata->dev;
572 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN ||
573 sdata->vif.type == IEEE80211_IF_TYPE_AP)
574 ieee80211_send_layer2_update(sta);
575
576 sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; 691 sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
577 692
578 sta_apply_parameters(local, sta, params); 693 sta_apply_parameters(local, sta, params);
579 694
580 rate_control_rate_init(sta, local); 695 rate_control_rate_init(sta, local);
581 696
582 sta_info_put(sta); 697 rcu_read_lock();
698
699 err = sta_info_insert(sta);
700 if (err) {
701 /* STA has been freed */
702 rcu_read_unlock();
703 return err;
704 }
705
706 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN ||
707 sdata->vif.type == IEEE80211_IF_TYPE_AP)
708 ieee80211_send_layer2_update(sta);
709
710 rcu_read_unlock();
583 711
584 return 0; 712 return 0;
585} 713}
@@ -587,19 +715,26 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
587static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, 715static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
588 u8 *mac) 716 u8 *mac)
589{ 717{
590 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 718 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
719 struct ieee80211_local *local = sdata->local;
591 struct sta_info *sta; 720 struct sta_info *sta;
592 721
593 if (mac) { 722 if (mac) {
723 rcu_read_lock();
724
594 /* XXX: get sta belonging to dev */ 725 /* XXX: get sta belonging to dev */
595 sta = sta_info_get(local, mac); 726 sta = sta_info_get(local, mac);
596 if (!sta) 727 if (!sta) {
728 rcu_read_unlock();
597 return -ENOENT; 729 return -ENOENT;
730 }
731
732 sta_info_unlink(&sta);
733 rcu_read_unlock();
598 734
599 sta_info_free(sta); 735 sta_info_destroy(sta);
600 sta_info_put(sta);
601 } else 736 } else
602 sta_info_flush(local, dev); 737 sta_info_flush(local, sdata);
603 738
604 return 0; 739 return 0;
605} 740}
@@ -613,28 +748,203 @@ static int ieee80211_change_station(struct wiphy *wiphy,
613 struct sta_info *sta; 748 struct sta_info *sta;
614 struct ieee80211_sub_if_data *vlansdata; 749 struct ieee80211_sub_if_data *vlansdata;
615 750
751 rcu_read_lock();
752
616 /* XXX: get sta belonging to dev */ 753 /* XXX: get sta belonging to dev */
617 sta = sta_info_get(local, mac); 754 sta = sta_info_get(local, mac);
618 if (!sta) 755 if (!sta) {
756 rcu_read_unlock();
619 return -ENOENT; 757 return -ENOENT;
758 }
620 759
621 if (params->vlan && params->vlan != sta->dev) { 760 if (params->vlan && params->vlan != sta->sdata->dev) {
622 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 761 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
623 762
624 if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN || 763 if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN ||
625 vlansdata->vif.type != IEEE80211_IF_TYPE_AP) 764 vlansdata->vif.type != IEEE80211_IF_TYPE_AP) {
765 rcu_read_unlock();
626 return -EINVAL; 766 return -EINVAL;
767 }
627 768
628 sta->dev = params->vlan; 769 sta->sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
629 ieee80211_send_layer2_update(sta); 770 ieee80211_send_layer2_update(sta);
630 } 771 }
631 772
632 sta_apply_parameters(local, sta, params); 773 sta_apply_parameters(local, sta, params);
633 774
634 sta_info_put(sta); 775 rcu_read_unlock();
776
777 return 0;
778}
779
780#ifdef CONFIG_MAC80211_MESH
781static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
782 u8 *dst, u8 *next_hop)
783{
784 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
785 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
786 struct mesh_path *mpath;
787 struct sta_info *sta;
788 int err;
789
790 if (!netif_running(dev))
791 return -ENETDOWN;
792
793 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)
794 return -ENOTSUPP;
795
796 rcu_read_lock();
797 sta = sta_info_get(local, next_hop);
798 if (!sta) {
799 rcu_read_unlock();
800 return -ENOENT;
801 }
635 802
803 err = mesh_path_add(dst, dev);
804 if (err) {
805 rcu_read_unlock();
806 return err;
807 }
808
809 mpath = mesh_path_lookup(dst, dev);
810 if (!mpath) {
811 rcu_read_unlock();
812 return -ENXIO;
813 }
814 mesh_path_fix_nexthop(mpath, sta);
815
816 rcu_read_unlock();
817 return 0;
818}
819
820static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
821 u8 *dst)
822{
823 if (dst)
824 return mesh_path_del(dst, dev);
825
826 mesh_path_flush(dev);
827 return 0;
828}
829
830static int ieee80211_change_mpath(struct wiphy *wiphy,
831 struct net_device *dev,
832 u8 *dst, u8 *next_hop)
833{
834 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
835 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
836 struct mesh_path *mpath;
837 struct sta_info *sta;
838
839 if (!netif_running(dev))
840 return -ENETDOWN;
841
842 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)
843 return -ENOTSUPP;
844
845 rcu_read_lock();
846
847 sta = sta_info_get(local, next_hop);
848 if (!sta) {
849 rcu_read_unlock();
850 return -ENOENT;
851 }
852
853 mpath = mesh_path_lookup(dst, dev);
854 if (!mpath) {
855 rcu_read_unlock();
856 return -ENOENT;
857 }
858
859 mesh_path_fix_nexthop(mpath, sta);
860
861 rcu_read_unlock();
862 return 0;
863}
864
865static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
866 struct mpath_info *pinfo)
867{
868 if (mpath->next_hop)
869 memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN);
870 else
871 memset(next_hop, 0, ETH_ALEN);
872
873 pinfo->filled = MPATH_INFO_FRAME_QLEN |
874 MPATH_INFO_DSN |
875 MPATH_INFO_METRIC |
876 MPATH_INFO_EXPTIME |
877 MPATH_INFO_DISCOVERY_TIMEOUT |
878 MPATH_INFO_DISCOVERY_RETRIES |
879 MPATH_INFO_FLAGS;
880
881 pinfo->frame_qlen = mpath->frame_queue.qlen;
882 pinfo->dsn = mpath->dsn;
883 pinfo->metric = mpath->metric;
884 if (time_before(jiffies, mpath->exp_time))
885 pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies);
886 pinfo->discovery_timeout =
887 jiffies_to_msecs(mpath->discovery_timeout);
888 pinfo->discovery_retries = mpath->discovery_retries;
889 pinfo->flags = 0;
890 if (mpath->flags & MESH_PATH_ACTIVE)
891 pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE;
892 if (mpath->flags & MESH_PATH_RESOLVING)
893 pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING;
894 if (mpath->flags & MESH_PATH_DSN_VALID)
895 pinfo->flags |= NL80211_MPATH_FLAG_DSN_VALID;
896 if (mpath->flags & MESH_PATH_FIXED)
897 pinfo->flags |= NL80211_MPATH_FLAG_FIXED;
898 if (mpath->flags & MESH_PATH_RESOLVING)
899 pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING;
900
901 pinfo->flags = mpath->flags;
902}
903
904static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
905 u8 *dst, u8 *next_hop, struct mpath_info *pinfo)
906
907{
908 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
909 struct mesh_path *mpath;
910
911 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)
912 return -ENOTSUPP;
913
914 rcu_read_lock();
915 mpath = mesh_path_lookup(dst, dev);
916 if (!mpath) {
917 rcu_read_unlock();
918 return -ENOENT;
919 }
920 memcpy(dst, mpath->dst, ETH_ALEN);
921 mpath_set_pinfo(mpath, next_hop, pinfo);
922 rcu_read_unlock();
923 return 0;
924}
925
926static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
927 int idx, u8 *dst, u8 *next_hop,
928 struct mpath_info *pinfo)
929{
930 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
931 struct mesh_path *mpath;
932
933 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)
934 return -ENOTSUPP;
935
936 rcu_read_lock();
937 mpath = mesh_path_lookup_by_idx(idx, dev);
938 if (!mpath) {
939 rcu_read_unlock();
940 return -ENOENT;
941 }
942 memcpy(dst, mpath->dst, ETH_ALEN);
943 mpath_set_pinfo(mpath, next_hop, pinfo);
944 rcu_read_unlock();
636 return 0; 945 return 0;
637} 946}
947#endif
638 948
639struct cfg80211_ops mac80211_config_ops = { 949struct cfg80211_ops mac80211_config_ops = {
640 .add_virtual_intf = ieee80211_add_iface, 950 .add_virtual_intf = ieee80211_add_iface,
@@ -651,4 +961,12 @@ struct cfg80211_ops mac80211_config_ops = {
651 .del_station = ieee80211_del_station, 961 .del_station = ieee80211_del_station,
652 .change_station = ieee80211_change_station, 962 .change_station = ieee80211_change_station,
653 .get_station = ieee80211_get_station, 963 .get_station = ieee80211_get_station,
964 .dump_station = ieee80211_dump_station,
965#ifdef CONFIG_MAC80211_MESH
966 .add_mpath = ieee80211_add_mpath,
967 .del_mpath = ieee80211_del_mpath,
968 .change_mpath = ieee80211_change_mpath,
969 .get_mpath = ieee80211_get_mpath,
970 .dump_mpath = ieee80211_dump_mpath,
971#endif
654}; 972};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 60514b2c97b9..1cccbfd781f6 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -10,7 +10,7 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/rtnetlink.h> 11#include <linux/rtnetlink.h>
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13#include "ieee80211_rate.h" 13#include "rate.h"
14#include "debugfs.h" 14#include "debugfs.h"
15 15
16int mac80211_open_file_generic(struct inode *inode, struct file *file) 16int mac80211_open_file_generic(struct inode *inode, struct file *file)
@@ -19,41 +19,6 @@ int mac80211_open_file_generic(struct inode *inode, struct file *file)
19 return 0; 19 return 0;
20} 20}
21 21
22static const char *ieee80211_mode_str(int mode)
23{
24 switch (mode) {
25 case MODE_IEEE80211A:
26 return "IEEE 802.11a";
27 case MODE_IEEE80211B:
28 return "IEEE 802.11b";
29 case MODE_IEEE80211G:
30 return "IEEE 802.11g";
31 default:
32 return "UNKNOWN";
33 }
34}
35
36static ssize_t modes_read(struct file *file, char __user *userbuf,
37 size_t count, loff_t *ppos)
38{
39 struct ieee80211_local *local = file->private_data;
40 struct ieee80211_hw_mode *mode;
41 char buf[150], *p = buf;
42
43 /* FIXME: locking! */
44 list_for_each_entry(mode, &local->modes_list, list) {
45 p += scnprintf(p, sizeof(buf)+buf-p,
46 "%s\n", ieee80211_mode_str(mode->mode));
47 }
48
49 return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf);
50}
51
52static const struct file_operations modes_ops = {
53 .read = modes_read,
54 .open = mac80211_open_file_generic,
55};
56
57#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ 22#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
58static ssize_t name## _read(struct file *file, char __user *userbuf, \ 23static ssize_t name## _read(struct file *file, char __user *userbuf, \
59 size_t count, loff_t *ppos) \ 24 size_t count, loff_t *ppos) \
@@ -72,7 +37,7 @@ static const struct file_operations name## _ops = { \
72}; 37};
73 38
74#define DEBUGFS_ADD(name) \ 39#define DEBUGFS_ADD(name) \
75 local->debugfs.name = debugfs_create_file(#name, 0444, phyd, \ 40 local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \
76 local, &name## _ops); 41 local, &name## _ops);
77 42
78#define DEBUGFS_DEL(name) \ 43#define DEBUGFS_DEL(name) \
@@ -80,10 +45,8 @@ static const struct file_operations name## _ops = { \
80 local->debugfs.name = NULL; 45 local->debugfs.name = NULL;
81 46
82 47
83DEBUGFS_READONLY_FILE(channel, 20, "%d",
84 local->hw.conf.channel);
85DEBUGFS_READONLY_FILE(frequency, 20, "%d", 48DEBUGFS_READONLY_FILE(frequency, 20, "%d",
86 local->hw.conf.freq); 49 local->hw.conf.channel->center_freq);
87DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d", 50DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d",
88 local->hw.conf.antenna_sel_tx); 51 local->hw.conf.antenna_sel_tx);
89DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", 52DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d",
@@ -100,8 +63,6 @@ DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
100 local->long_retry_limit); 63 local->long_retry_limit);
101DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", 64DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
102 local->total_ps_buffered); 65 local->total_ps_buffered);
103DEBUGFS_READONLY_FILE(mode, 20, "%s",
104 ieee80211_mode_str(local->hw.conf.phymode));
105DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x", 66DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x",
106 local->wep_iv & 0xffffff); 67 local->wep_iv & 0xffffff);
107DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", 68DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
@@ -169,7 +130,7 @@ static const struct file_operations stats_ ##name## _ops = { \
169}; 130};
170 131
171#define DEBUGFS_STATS_ADD(name) \ 132#define DEBUGFS_STATS_ADD(name) \
172 local->debugfs.stats.name = debugfs_create_file(#name, 0444, statsd,\ 133 local->debugfs.stats.name = debugfs_create_file(#name, 0400, statsd,\
173 local, &stats_ ##name## _ops); 134 local, &stats_ ##name## _ops);
174 135
175#define DEBUGFS_STATS_DEL(name) \ 136#define DEBUGFS_STATS_DEL(name) \
@@ -294,7 +255,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
294 local->debugfs.stations = debugfs_create_dir("stations", phyd); 255 local->debugfs.stations = debugfs_create_dir("stations", phyd);
295 local->debugfs.keys = debugfs_create_dir("keys", phyd); 256 local->debugfs.keys = debugfs_create_dir("keys", phyd);
296 257
297 DEBUGFS_ADD(channel);
298 DEBUGFS_ADD(frequency); 258 DEBUGFS_ADD(frequency);
299 DEBUGFS_ADD(antenna_sel_tx); 259 DEBUGFS_ADD(antenna_sel_tx);
300 DEBUGFS_ADD(antenna_sel_rx); 260 DEBUGFS_ADD(antenna_sel_rx);
@@ -304,9 +264,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
304 DEBUGFS_ADD(short_retry_limit); 264 DEBUGFS_ADD(short_retry_limit);
305 DEBUGFS_ADD(long_retry_limit); 265 DEBUGFS_ADD(long_retry_limit);
306 DEBUGFS_ADD(total_ps_buffered); 266 DEBUGFS_ADD(total_ps_buffered);
307 DEBUGFS_ADD(mode);
308 DEBUGFS_ADD(wep_iv); 267 DEBUGFS_ADD(wep_iv);
309 DEBUGFS_ADD(modes);
310 268
311 statsd = debugfs_create_dir("statistics", phyd); 269 statsd = debugfs_create_dir("statistics", phyd);
312 local->debugfs.statistics = statsd; 270 local->debugfs.statistics = statsd;
@@ -356,7 +314,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
356 314
357void debugfs_hw_del(struct ieee80211_local *local) 315void debugfs_hw_del(struct ieee80211_local *local)
358{ 316{
359 DEBUGFS_DEL(channel);
360 DEBUGFS_DEL(frequency); 317 DEBUGFS_DEL(frequency);
361 DEBUGFS_DEL(antenna_sel_tx); 318 DEBUGFS_DEL(antenna_sel_tx);
362 DEBUGFS_DEL(antenna_sel_rx); 319 DEBUGFS_DEL(antenna_sel_rx);
@@ -366,9 +323,7 @@ void debugfs_hw_del(struct ieee80211_local *local)
366 DEBUGFS_DEL(short_retry_limit); 323 DEBUGFS_DEL(short_retry_limit);
367 DEBUGFS_DEL(long_retry_limit); 324 DEBUGFS_DEL(long_retry_limit);
368 DEBUGFS_DEL(total_ps_buffered); 325 DEBUGFS_DEL(total_ps_buffered);
369 DEBUGFS_DEL(mode);
370 DEBUGFS_DEL(wep_iv); 326 DEBUGFS_DEL(wep_iv);
371 DEBUGFS_DEL(modes);
372 327
373 DEBUGFS_STATS_DEL(transmitted_fragment_count); 328 DEBUGFS_STATS_DEL(transmitted_fragment_count);
374 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); 329 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index c881524c8725..879e7210458a 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -10,7 +10,7 @@
10 10
11#include <linux/kobject.h> 11#include <linux/kobject.h>
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13#include "ieee80211_key.h" 13#include "key.h"
14#include "debugfs.h" 14#include "debugfs.h"
15#include "debugfs_key.h" 15#include "debugfs_key.h"
16 16
@@ -184,23 +184,36 @@ KEY_OPS(key);
184 key->debugfs.name = debugfs_create_file(#name, 0400,\ 184 key->debugfs.name = debugfs_create_file(#name, 0400,\
185 key->debugfs.dir, key, &key_##name##_ops); 185 key->debugfs.dir, key, &key_##name##_ops);
186 186
187void ieee80211_debugfs_key_add(struct ieee80211_local *local, 187void ieee80211_debugfs_key_add(struct ieee80211_key *key)
188 struct ieee80211_key *key) 188 {
189{
190 static int keycount; 189 static int keycount;
191 char buf[20]; 190 char buf[50];
191 DECLARE_MAC_BUF(mac);
192 struct sta_info *sta;
192 193
193 if (!local->debugfs.keys) 194 if (!key->local->debugfs.keys)
194 return; 195 return;
195 196
196 sprintf(buf, "%d", keycount); 197 sprintf(buf, "%d", keycount);
198 key->debugfs.cnt = keycount;
197 keycount++; 199 keycount++;
198 key->debugfs.dir = debugfs_create_dir(buf, 200 key->debugfs.dir = debugfs_create_dir(buf,
199 local->debugfs.keys); 201 key->local->debugfs.keys);
200 202
201 if (!key->debugfs.dir) 203 if (!key->debugfs.dir)
202 return; 204 return;
203 205
206 rcu_read_lock();
207 sta = rcu_dereference(key->sta);
208 if (sta)
209 sprintf(buf, "../../stations/%s", print_mac(mac, sta->addr));
210 rcu_read_unlock();
211
212 /* using sta as a boolean is fine outside RCU lock */
213 if (sta)
214 key->debugfs.stalink =
215 debugfs_create_symlink("station", key->debugfs.dir, buf);
216
204 DEBUGFS_ADD(keylen); 217 DEBUGFS_ADD(keylen);
205 DEBUGFS_ADD(flags); 218 DEBUGFS_ADD(flags);
206 DEBUGFS_ADD(keyidx); 219 DEBUGFS_ADD(keyidx);
@@ -246,7 +259,7 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
246 if (!sdata->debugfsdir) 259 if (!sdata->debugfsdir)
247 return; 260 return;
248 261
249 sprintf(buf, "../keys/%d", sdata->default_key->conf.keyidx); 262 sprintf(buf, "../keys/%d", sdata->default_key->debugfs.cnt);
250 sdata->debugfs.default_key = 263 sdata->debugfs.default_key =
251 debugfs_create_symlink("default_key", sdata->debugfsdir, buf); 264 debugfs_create_symlink("default_key", sdata->debugfsdir, buf);
252} 265}
@@ -258,19 +271,6 @@ void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata)
258 debugfs_remove(sdata->debugfs.default_key); 271 debugfs_remove(sdata->debugfs.default_key);
259 sdata->debugfs.default_key = NULL; 272 sdata->debugfs.default_key = NULL;
260} 273}
261void ieee80211_debugfs_key_sta_link(struct ieee80211_key *key,
262 struct sta_info *sta)
263{
264 char buf[50];
265 DECLARE_MAC_BUF(mac);
266
267 if (!key->debugfs.dir)
268 return;
269
270 sprintf(buf, "../../stations/%s", print_mac(mac, sta->addr));
271 key->debugfs.stalink =
272 debugfs_create_symlink("station", key->debugfs.dir, buf);
273}
274 274
275void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, 275void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
276 struct sta_info *sta) 276 struct sta_info *sta)
diff --git a/net/mac80211/debugfs_key.h b/net/mac80211/debugfs_key.h
index aecfce395da6..b1a3754ee240 100644
--- a/net/mac80211/debugfs_key.h
+++ b/net/mac80211/debugfs_key.h
@@ -2,18 +2,14 @@
2#define __MAC80211_DEBUGFS_KEY_H 2#define __MAC80211_DEBUGFS_KEY_H
3 3
4#ifdef CONFIG_MAC80211_DEBUGFS 4#ifdef CONFIG_MAC80211_DEBUGFS
5void ieee80211_debugfs_key_add(struct ieee80211_local *local, 5void ieee80211_debugfs_key_add(struct ieee80211_key *key);
6 struct ieee80211_key *key);
7void ieee80211_debugfs_key_remove(struct ieee80211_key *key); 6void ieee80211_debugfs_key_remove(struct ieee80211_key *key);
8void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata); 7void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata);
9void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata); 8void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata);
10void ieee80211_debugfs_key_sta_link(struct ieee80211_key *key,
11 struct sta_info *sta);
12void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, 9void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
13 struct sta_info *sta); 10 struct sta_info *sta);
14#else 11#else
15static inline void ieee80211_debugfs_key_add(struct ieee80211_local *local, 12static inline void ieee80211_debugfs_key_add(struct ieee80211_key *key)
16 struct ieee80211_key *key)
17{} 13{}
18static inline void ieee80211_debugfs_key_remove(struct ieee80211_key *key) 14static inline void ieee80211_debugfs_key_remove(struct ieee80211_key *key)
19{} 15{}
@@ -23,9 +19,6 @@ static inline void ieee80211_debugfs_key_add_default(
23static inline void ieee80211_debugfs_key_remove_default( 19static inline void ieee80211_debugfs_key_remove_default(
24 struct ieee80211_sub_if_data *sdata) 20 struct ieee80211_sub_if_data *sdata)
25{} 21{}
26static inline void ieee80211_debugfs_key_sta_link(
27 struct ieee80211_key *key, struct sta_info *sta)
28{}
29static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, 22static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
30 struct sta_info *sta) 23 struct sta_info *sta)
31{} 24{}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 829872a3ae81..e3326d046944 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -17,7 +17,7 @@
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include <net/cfg80211.h> 18#include <net/cfg80211.h>
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "ieee80211_rate.h" 20#include "rate.h"
21#include "debugfs.h" 21#include "debugfs.h"
22#include "debugfs_netdev.h" 22#include "debugfs_netdev.h"
23 23
@@ -31,14 +31,39 @@ static ssize_t ieee80211_if_read(
31 ssize_t ret = -EINVAL; 31 ssize_t ret = -EINVAL;
32 32
33 read_lock(&dev_base_lock); 33 read_lock(&dev_base_lock);
34 if (sdata->dev->reg_state == NETREG_REGISTERED) { 34 if (sdata->dev->reg_state == NETREG_REGISTERED)
35 ret = (*format)(sdata, buf, sizeof(buf)); 35 ret = (*format)(sdata, buf, sizeof(buf));
36 ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
37 }
38 read_unlock(&dev_base_lock); 36 read_unlock(&dev_base_lock);
37
38 if (ret != -EINVAL)
39 ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
40
39 return ret; 41 return ret;
40} 42}
41 43
44#ifdef CONFIG_MAC80211_MESH
45static ssize_t ieee80211_if_write(
46 struct ieee80211_sub_if_data *sdata,
47 char const __user *userbuf,
48 size_t count, loff_t *ppos,
49 int (*format)(struct ieee80211_sub_if_data *, char *))
50{
51 char buf[10];
52 int buf_size;
53
54 memset(buf, 0x00, sizeof(buf));
55 buf_size = min(count, (sizeof(buf)-1));
56 if (copy_from_user(buf, userbuf, buf_size))
57 return count;
58 read_lock(&dev_base_lock);
59 if (sdata->dev->reg_state == NETREG_REGISTERED)
60 (*format)(sdata, buf);
61 read_unlock(&dev_base_lock);
62
63 return count;
64}
65#endif
66
42#define IEEE80211_IF_FMT(name, field, format_string) \ 67#define IEEE80211_IF_FMT(name, field, format_string) \
43static ssize_t ieee80211_if_fmt_##name( \ 68static ssize_t ieee80211_if_fmt_##name( \
44 const struct ieee80211_sub_if_data *sdata, char *buf, \ 69 const struct ieee80211_sub_if_data *sdata, char *buf, \
@@ -46,6 +71,19 @@ static ssize_t ieee80211_if_fmt_##name( \
46{ \ 71{ \
47 return scnprintf(buf, buflen, format_string, sdata->field); \ 72 return scnprintf(buf, buflen, format_string, sdata->field); \
48} 73}
74#define IEEE80211_IF_WFMT(name, field, type) \
75static int ieee80211_if_wfmt_##name( \
76 struct ieee80211_sub_if_data *sdata, char *buf) \
77{ \
78 unsigned long tmp; \
79 char *endp; \
80 \
81 tmp = simple_strtoul(buf, &endp, 0); \
82 if ((endp == buf) || ((type)tmp != tmp)) \
83 return -EINVAL; \
84 sdata->field = tmp; \
85 return 0; \
86}
49#define IEEE80211_IF_FMT_DEC(name, field) \ 87#define IEEE80211_IF_FMT_DEC(name, field) \
50 IEEE80211_IF_FMT(name, field, "%d\n") 88 IEEE80211_IF_FMT(name, field, "%d\n")
51#define IEEE80211_IF_FMT_HEX(name, field) \ 89#define IEEE80211_IF_FMT_HEX(name, field) \
@@ -88,10 +126,37 @@ static const struct file_operations name##_ops = { \
88 IEEE80211_IF_FMT_##format(name, field) \ 126 IEEE80211_IF_FMT_##format(name, field) \
89 __IEEE80211_IF_FILE(name) 127 __IEEE80211_IF_FILE(name)
90 128
129#define __IEEE80211_IF_WFILE(name) \
130static ssize_t ieee80211_if_read_##name(struct file *file, \
131 char __user *userbuf, \
132 size_t count, loff_t *ppos) \
133{ \
134 return ieee80211_if_read(file->private_data, \
135 userbuf, count, ppos, \
136 ieee80211_if_fmt_##name); \
137} \
138static ssize_t ieee80211_if_write_##name(struct file *file, \
139 const char __user *userbuf, \
140 size_t count, loff_t *ppos) \
141{ \
142 return ieee80211_if_write(file->private_data, \
143 userbuf, count, ppos, \
144 ieee80211_if_wfmt_##name); \
145} \
146static const struct file_operations name##_ops = { \
147 .read = ieee80211_if_read_##name, \
148 .write = ieee80211_if_write_##name, \
149 .open = mac80211_open_file_generic, \
150}
151
152#define IEEE80211_IF_WFILE(name, field, format, type) \
153 IEEE80211_IF_FMT_##format(name, field) \
154 IEEE80211_IF_WFMT(name, field, type) \
155 __IEEE80211_IF_WFILE(name)
156
91/* common attributes */ 157/* common attributes */
92IEEE80211_IF_FILE(channel_use, channel_use, DEC); 158IEEE80211_IF_FILE(channel_use, channel_use, DEC);
93IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 159IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
94IEEE80211_IF_FILE(ieee802_1x_pac, ieee802_1x_pac, DEC);
95 160
96/* STA/IBSS attributes */ 161/* STA/IBSS attributes */
97IEEE80211_IF_FILE(state, u.sta.state, DEC); 162IEEE80211_IF_FILE(state, u.sta.state, DEC);
@@ -107,6 +172,7 @@ IEEE80211_IF_FILE(assoc_tries, u.sta.assoc_tries, DEC);
107IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX); 172IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX);
108IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC); 173IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC);
109IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC); 174IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC);
175IEEE80211_IF_FILE(num_beacons_sta, u.sta.num_beacons, DEC);
110 176
111static ssize_t ieee80211_if_fmt_flags( 177static ssize_t ieee80211_if_fmt_flags(
112 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) 178 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -140,15 +206,50 @@ __IEEE80211_IF_FILE(num_buffered_multicast);
140/* WDS attributes */ 206/* WDS attributes */
141IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); 207IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
142 208
209#ifdef CONFIG_MAC80211_MESH
210/* Mesh stats attributes */
211IEEE80211_IF_FILE(fwded_frames, u.sta.mshstats.fwded_frames, DEC);
212IEEE80211_IF_FILE(dropped_frames_ttl, u.sta.mshstats.dropped_frames_ttl, DEC);
213IEEE80211_IF_FILE(dropped_frames_no_route,
214 u.sta.mshstats.dropped_frames_no_route, DEC);
215IEEE80211_IF_FILE(estab_plinks, u.sta.mshstats.estab_plinks, ATOMIC);
216
217/* Mesh parameters */
218IEEE80211_IF_WFILE(dot11MeshMaxRetries,
219 u.sta.mshcfg.dot11MeshMaxRetries, DEC, u8);
220IEEE80211_IF_WFILE(dot11MeshRetryTimeout,
221 u.sta.mshcfg.dot11MeshRetryTimeout, DEC, u16);
222IEEE80211_IF_WFILE(dot11MeshConfirmTimeout,
223 u.sta.mshcfg.dot11MeshConfirmTimeout, DEC, u16);
224IEEE80211_IF_WFILE(dot11MeshHoldingTimeout,
225 u.sta.mshcfg.dot11MeshHoldingTimeout, DEC, u16);
226IEEE80211_IF_WFILE(dot11MeshTTL, u.sta.mshcfg.dot11MeshTTL, DEC, u8);
227IEEE80211_IF_WFILE(auto_open_plinks, u.sta.mshcfg.auto_open_plinks, DEC, u8);
228IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks,
229 u.sta.mshcfg.dot11MeshMaxPeerLinks, DEC, u16);
230IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout,
231 u.sta.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32);
232IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval,
233 u.sta.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16);
234IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime,
235 u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16);
236IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries,
237 u.sta.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8);
238IEEE80211_IF_WFILE(path_refresh_time,
239 u.sta.mshcfg.path_refresh_time, DEC, u32);
240IEEE80211_IF_WFILE(min_discovery_timeout,
241 u.sta.mshcfg.min_discovery_timeout, DEC, u16);
242#endif
243
244
143#define DEBUGFS_ADD(name, type)\ 245#define DEBUGFS_ADD(name, type)\
144 sdata->debugfs.type.name = debugfs_create_file(#name, 0444,\ 246 sdata->debugfs.type.name = debugfs_create_file(#name, 0400,\
145 sdata->debugfsdir, sdata, &name##_ops); 247 sdata->debugfsdir, sdata, &name##_ops);
146 248
147static void add_sta_files(struct ieee80211_sub_if_data *sdata) 249static void add_sta_files(struct ieee80211_sub_if_data *sdata)
148{ 250{
149 DEBUGFS_ADD(channel_use, sta); 251 DEBUGFS_ADD(channel_use, sta);
150 DEBUGFS_ADD(drop_unencrypted, sta); 252 DEBUGFS_ADD(drop_unencrypted, sta);
151 DEBUGFS_ADD(ieee802_1x_pac, sta);
152 DEBUGFS_ADD(state, sta); 253 DEBUGFS_ADD(state, sta);
153 DEBUGFS_ADD(bssid, sta); 254 DEBUGFS_ADD(bssid, sta);
154 DEBUGFS_ADD(prev_bssid, sta); 255 DEBUGFS_ADD(prev_bssid, sta);
@@ -163,13 +264,13 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
163 DEBUGFS_ADD(auth_alg, sta); 264 DEBUGFS_ADD(auth_alg, sta);
164 DEBUGFS_ADD(auth_transaction, sta); 265 DEBUGFS_ADD(auth_transaction, sta);
165 DEBUGFS_ADD(flags, sta); 266 DEBUGFS_ADD(flags, sta);
267 DEBUGFS_ADD(num_beacons_sta, sta);
166} 268}
167 269
168static void add_ap_files(struct ieee80211_sub_if_data *sdata) 270static void add_ap_files(struct ieee80211_sub_if_data *sdata)
169{ 271{
170 DEBUGFS_ADD(channel_use, ap); 272 DEBUGFS_ADD(channel_use, ap);
171 DEBUGFS_ADD(drop_unencrypted, ap); 273 DEBUGFS_ADD(drop_unencrypted, ap);
172 DEBUGFS_ADD(ieee802_1x_pac, ap);
173 DEBUGFS_ADD(num_sta_ps, ap); 274 DEBUGFS_ADD(num_sta_ps, ap);
174 DEBUGFS_ADD(dtim_count, ap); 275 DEBUGFS_ADD(dtim_count, ap);
175 DEBUGFS_ADD(num_beacons, ap); 276 DEBUGFS_ADD(num_beacons, ap);
@@ -182,7 +283,6 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
182{ 283{
183 DEBUGFS_ADD(channel_use, wds); 284 DEBUGFS_ADD(channel_use, wds);
184 DEBUGFS_ADD(drop_unencrypted, wds); 285 DEBUGFS_ADD(drop_unencrypted, wds);
185 DEBUGFS_ADD(ieee802_1x_pac, wds);
186 DEBUGFS_ADD(peer, wds); 286 DEBUGFS_ADD(peer, wds);
187} 287}
188 288
@@ -190,19 +290,63 @@ static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
190{ 290{
191 DEBUGFS_ADD(channel_use, vlan); 291 DEBUGFS_ADD(channel_use, vlan);
192 DEBUGFS_ADD(drop_unencrypted, vlan); 292 DEBUGFS_ADD(drop_unencrypted, vlan);
193 DEBUGFS_ADD(ieee802_1x_pac, vlan);
194} 293}
195 294
196static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 295static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
197{ 296{
198} 297}
199 298
299#ifdef CONFIG_MAC80211_MESH
300#define MESHSTATS_ADD(name)\
301 sdata->mesh_stats.name = debugfs_create_file(#name, 0400,\
302 sdata->mesh_stats_dir, sdata, &name##_ops);
303
304static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
305{
306 sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats",
307 sdata->debugfsdir);
308 MESHSTATS_ADD(fwded_frames);
309 MESHSTATS_ADD(dropped_frames_ttl);
310 MESHSTATS_ADD(dropped_frames_no_route);
311 MESHSTATS_ADD(estab_plinks);
312}
313
314#define MESHPARAMS_ADD(name)\
315 sdata->mesh_config.name = debugfs_create_file(#name, 0600,\
316 sdata->mesh_config_dir, sdata, &name##_ops);
317
318static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
319{
320 sdata->mesh_config_dir = debugfs_create_dir("mesh_config",
321 sdata->debugfsdir);
322 MESHPARAMS_ADD(dot11MeshMaxRetries);
323 MESHPARAMS_ADD(dot11MeshRetryTimeout);
324 MESHPARAMS_ADD(dot11MeshConfirmTimeout);
325 MESHPARAMS_ADD(dot11MeshHoldingTimeout);
326 MESHPARAMS_ADD(dot11MeshTTL);
327 MESHPARAMS_ADD(auto_open_plinks);
328 MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
329 MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
330 MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
331 MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
332 MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
333 MESHPARAMS_ADD(path_refresh_time);
334 MESHPARAMS_ADD(min_discovery_timeout);
335}
336#endif
337
200static void add_files(struct ieee80211_sub_if_data *sdata) 338static void add_files(struct ieee80211_sub_if_data *sdata)
201{ 339{
202 if (!sdata->debugfsdir) 340 if (!sdata->debugfsdir)
203 return; 341 return;
204 342
205 switch (sdata->vif.type) { 343 switch (sdata->vif.type) {
344 case IEEE80211_IF_TYPE_MESH_POINT:
345#ifdef CONFIG_MAC80211_MESH
346 add_mesh_stats(sdata);
347 add_mesh_config(sdata);
348#endif
349 /* fall through */
206 case IEEE80211_IF_TYPE_STA: 350 case IEEE80211_IF_TYPE_STA:
207 case IEEE80211_IF_TYPE_IBSS: 351 case IEEE80211_IF_TYPE_IBSS:
208 add_sta_files(sdata); 352 add_sta_files(sdata);
@@ -234,7 +378,6 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
234{ 378{
235 DEBUGFS_DEL(channel_use, sta); 379 DEBUGFS_DEL(channel_use, sta);
236 DEBUGFS_DEL(drop_unencrypted, sta); 380 DEBUGFS_DEL(drop_unencrypted, sta);
237 DEBUGFS_DEL(ieee802_1x_pac, sta);
238 DEBUGFS_DEL(state, sta); 381 DEBUGFS_DEL(state, sta);
239 DEBUGFS_DEL(bssid, sta); 382 DEBUGFS_DEL(bssid, sta);
240 DEBUGFS_DEL(prev_bssid, sta); 383 DEBUGFS_DEL(prev_bssid, sta);
@@ -249,13 +392,13 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
249 DEBUGFS_DEL(auth_alg, sta); 392 DEBUGFS_DEL(auth_alg, sta);
250 DEBUGFS_DEL(auth_transaction, sta); 393 DEBUGFS_DEL(auth_transaction, sta);
251 DEBUGFS_DEL(flags, sta); 394 DEBUGFS_DEL(flags, sta);
395 DEBUGFS_DEL(num_beacons_sta, sta);
252} 396}
253 397
254static void del_ap_files(struct ieee80211_sub_if_data *sdata) 398static void del_ap_files(struct ieee80211_sub_if_data *sdata)
255{ 399{
256 DEBUGFS_DEL(channel_use, ap); 400 DEBUGFS_DEL(channel_use, ap);
257 DEBUGFS_DEL(drop_unencrypted, ap); 401 DEBUGFS_DEL(drop_unencrypted, ap);
258 DEBUGFS_DEL(ieee802_1x_pac, ap);
259 DEBUGFS_DEL(num_sta_ps, ap); 402 DEBUGFS_DEL(num_sta_ps, ap);
260 DEBUGFS_DEL(dtim_count, ap); 403 DEBUGFS_DEL(dtim_count, ap);
261 DEBUGFS_DEL(num_beacons, ap); 404 DEBUGFS_DEL(num_beacons, ap);
@@ -268,7 +411,6 @@ static void del_wds_files(struct ieee80211_sub_if_data *sdata)
268{ 411{
269 DEBUGFS_DEL(channel_use, wds); 412 DEBUGFS_DEL(channel_use, wds);
270 DEBUGFS_DEL(drop_unencrypted, wds); 413 DEBUGFS_DEL(drop_unencrypted, wds);
271 DEBUGFS_DEL(ieee802_1x_pac, wds);
272 DEBUGFS_DEL(peer, wds); 414 DEBUGFS_DEL(peer, wds);
273} 415}
274 416
@@ -276,19 +418,67 @@ static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
276{ 418{
277 DEBUGFS_DEL(channel_use, vlan); 419 DEBUGFS_DEL(channel_use, vlan);
278 DEBUGFS_DEL(drop_unencrypted, vlan); 420 DEBUGFS_DEL(drop_unencrypted, vlan);
279 DEBUGFS_DEL(ieee802_1x_pac, vlan);
280} 421}
281 422
282static void del_monitor_files(struct ieee80211_sub_if_data *sdata) 423static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
283{ 424{
284} 425}
285 426
427#ifdef CONFIG_MAC80211_MESH
428#define MESHSTATS_DEL(name) \
429 do { \
430 debugfs_remove(sdata->mesh_stats.name); \
431 sdata->mesh_stats.name = NULL; \
432 } while (0)
433
434static void del_mesh_stats(struct ieee80211_sub_if_data *sdata)
435{
436 MESHSTATS_DEL(fwded_frames);
437 MESHSTATS_DEL(dropped_frames_ttl);
438 MESHSTATS_DEL(dropped_frames_no_route);
439 MESHSTATS_DEL(estab_plinks);
440 debugfs_remove(sdata->mesh_stats_dir);
441 sdata->mesh_stats_dir = NULL;
442}
443
444#define MESHPARAMS_DEL(name) \
445 do { \
446 debugfs_remove(sdata->mesh_config.name); \
447 sdata->mesh_config.name = NULL; \
448 } while (0)
449
450static void del_mesh_config(struct ieee80211_sub_if_data *sdata)
451{
452 MESHPARAMS_DEL(dot11MeshMaxRetries);
453 MESHPARAMS_DEL(dot11MeshRetryTimeout);
454 MESHPARAMS_DEL(dot11MeshConfirmTimeout);
455 MESHPARAMS_DEL(dot11MeshHoldingTimeout);
456 MESHPARAMS_DEL(dot11MeshTTL);
457 MESHPARAMS_DEL(auto_open_plinks);
458 MESHPARAMS_DEL(dot11MeshMaxPeerLinks);
459 MESHPARAMS_DEL(dot11MeshHWMPactivePathTimeout);
460 MESHPARAMS_DEL(dot11MeshHWMPpreqMinInterval);
461 MESHPARAMS_DEL(dot11MeshHWMPnetDiameterTraversalTime);
462 MESHPARAMS_DEL(dot11MeshHWMPmaxPREQretries);
463 MESHPARAMS_DEL(path_refresh_time);
464 MESHPARAMS_DEL(min_discovery_timeout);
465 debugfs_remove(sdata->mesh_config_dir);
466 sdata->mesh_config_dir = NULL;
467}
468#endif
469
286static void del_files(struct ieee80211_sub_if_data *sdata, int type) 470static void del_files(struct ieee80211_sub_if_data *sdata, int type)
287{ 471{
288 if (!sdata->debugfsdir) 472 if (!sdata->debugfsdir)
289 return; 473 return;
290 474
291 switch (type) { 475 switch (type) {
476 case IEEE80211_IF_TYPE_MESH_POINT:
477#ifdef CONFIG_MAC80211_MESH
478 del_mesh_stats(sdata);
479 del_mesh_config(sdata);
480#endif
481 /* fall through */
292 case IEEE80211_IF_TYPE_STA: 482 case IEEE80211_IF_TYPE_STA:
293 case IEEE80211_IF_TYPE_IBSS: 483 case IEEE80211_IF_TYPE_IBSS:
294 del_sta_files(sdata); 484 del_sta_files(sdata);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 8f5944c53d4e..6d47a1d31b37 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -33,25 +33,16 @@ static ssize_t sta_ ##name## _read(struct file *file, \
33#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") 33#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n")
34#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") 34#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n")
35 35
36#define STA_READ_RATE(name, field) \ 36#define STA_OPS(name) \
37static ssize_t sta_##name##_read(struct file *file, \ 37static const struct file_operations sta_ ##name## _ops = { \
38 char __user *userbuf, \ 38 .read = sta_##name##_read, \
39 size_t count, loff_t *ppos) \ 39 .open = mac80211_open_file_generic, \
40{ \
41 struct sta_info *sta = file->private_data; \
42 struct ieee80211_local *local = wdev_priv(sta->dev->ieee80211_ptr);\
43 struct ieee80211_hw_mode *mode = local->oper_hw_mode; \
44 char buf[20]; \
45 int res = scnprintf(buf, sizeof(buf), "%d\n", \
46 (sta->field >= 0 && \
47 sta->field < mode->num_rates) ? \
48 mode->rates[sta->field].rate : -1); \
49 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
50} 40}
51 41
52#define STA_OPS(name) \ 42#define STA_OPS_WR(name) \
53static const struct file_operations sta_ ##name## _ops = { \ 43static const struct file_operations sta_ ##name## _ops = { \
54 .read = sta_##name##_read, \ 44 .read = sta_##name##_read, \
45 .write = sta_##name##_write, \
55 .open = mac80211_open_file_generic, \ 46 .open = mac80211_open_file_generic, \
56} 47}
57 48
@@ -60,7 +51,7 @@ static const struct file_operations sta_ ##name## _ops = { \
60 STA_OPS(name) 51 STA_OPS(name)
61 52
62STA_FILE(aid, aid, D); 53STA_FILE(aid, aid, D);
63STA_FILE(dev, dev->name, S); 54STA_FILE(dev, sdata->dev->name, S);
64STA_FILE(rx_packets, rx_packets, LU); 55STA_FILE(rx_packets, rx_packets, LU);
65STA_FILE(tx_packets, tx_packets, LU); 56STA_FILE(tx_packets, tx_packets, LU);
66STA_FILE(rx_bytes, rx_bytes, LU); 57STA_FILE(rx_bytes, rx_bytes, LU);
@@ -70,27 +61,23 @@ STA_FILE(rx_fragments, rx_fragments, LU);
70STA_FILE(rx_dropped, rx_dropped, LU); 61STA_FILE(rx_dropped, rx_dropped, LU);
71STA_FILE(tx_fragments, tx_fragments, LU); 62STA_FILE(tx_fragments, tx_fragments, LU);
72STA_FILE(tx_filtered, tx_filtered_count, LU); 63STA_FILE(tx_filtered, tx_filtered_count, LU);
73STA_FILE(txrate, txrate, RATE);
74STA_FILE(last_txrate, last_txrate, RATE);
75STA_FILE(tx_retry_failed, tx_retry_failed, LU); 64STA_FILE(tx_retry_failed, tx_retry_failed, LU);
76STA_FILE(tx_retry_count, tx_retry_count, LU); 65STA_FILE(tx_retry_count, tx_retry_count, LU);
77STA_FILE(last_rssi, last_rssi, D); 66STA_FILE(last_rssi, last_rssi, D);
78STA_FILE(last_signal, last_signal, D); 67STA_FILE(last_signal, last_signal, D);
79STA_FILE(last_noise, last_noise, D); 68STA_FILE(last_noise, last_noise, D);
80STA_FILE(channel_use, channel_use, D); 69STA_FILE(channel_use, channel_use, D);
81STA_FILE(wep_weak_iv_count, wep_weak_iv_count, D); 70STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
82 71
83static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 72static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
84 size_t count, loff_t *ppos) 73 size_t count, loff_t *ppos)
85{ 74{
86 char buf[100]; 75 char buf[100];
87 struct sta_info *sta = file->private_data; 76 struct sta_info *sta = file->private_data;
88 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s", 77 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s",
89 sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "", 78 sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "",
90 sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 79 sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
91 sta->flags & WLAN_STA_PS ? "PS\n" : "", 80 sta->flags & WLAN_STA_PS ? "PS\n" : "",
92 sta->flags & WLAN_STA_TIM ? "TIM\n" : "",
93 sta->flags & WLAN_STA_PERM ? "PERM\n" : "",
94 sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 81 sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
95 sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 82 sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
96 sta->flags & WLAN_STA_WME ? "WME\n" : "", 83 sta->flags & WLAN_STA_WME ? "WME\n" : "",
@@ -111,31 +98,6 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
111} 98}
112STA_OPS(num_ps_buf_frames); 99STA_OPS(num_ps_buf_frames);
113 100
114static ssize_t sta_last_ack_rssi_read(struct file *file, char __user *userbuf,
115 size_t count, loff_t *ppos)
116{
117 char buf[100];
118 struct sta_info *sta = file->private_data;
119 int res = scnprintf(buf, sizeof(buf), "%d %d %d\n",
120 sta->last_ack_rssi[0],
121 sta->last_ack_rssi[1],
122 sta->last_ack_rssi[2]);
123 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
124}
125STA_OPS(last_ack_rssi);
126
127static ssize_t sta_last_ack_ms_read(struct file *file, char __user *userbuf,
128 size_t count, loff_t *ppos)
129{
130 char buf[20];
131 struct sta_info *sta = file->private_data;
132 int res = scnprintf(buf, sizeof(buf), "%d\n",
133 sta->last_ack ?
134 jiffies_to_msecs(jiffies - sta->last_ack) : -1);
135 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
136}
137STA_OPS(last_ack_ms);
138
139static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, 101static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
140 size_t count, loff_t *ppos) 102 size_t count, loff_t *ppos)
141{ 103{
@@ -191,8 +153,120 @@ static ssize_t sta_wme_tx_queue_read(struct file *file, char __user *userbuf,
191STA_OPS(wme_tx_queue); 153STA_OPS(wme_tx_queue);
192#endif 154#endif
193 155
156static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
157 size_t count, loff_t *ppos)
158{
159 char buf[768], *p = buf;
160 int i;
161 struct sta_info *sta = file->private_data;
162 p += scnprintf(p, sizeof(buf)+buf-p, "Agg state for STA is:\n");
163 p += scnprintf(p, sizeof(buf)+buf-p, " STA next dialog_token is %d \n "
164 "TIDs info is: \n TID :",
165 (sta->ampdu_mlme.dialog_token_allocator + 1));
166 for (i = 0; i < STA_TID_NUM; i++)
167 p += scnprintf(p, sizeof(buf)+buf-p, "%5d", i);
168
169 p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :");
170 for (i = 0; i < STA_TID_NUM; i++)
171 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
172 sta->ampdu_mlme.tid_state_rx[i]);
173
174 p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
175 for (i = 0; i < STA_TID_NUM; i++)
176 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
177 sta->ampdu_mlme.tid_state_rx[i]?
178 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
179
180 p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :");
181 for (i = 0; i < STA_TID_NUM; i++)
182 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
183 sta->ampdu_mlme.tid_state_tx[i]);
184
185 p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
186 for (i = 0; i < STA_TID_NUM; i++)
187 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
188 sta->ampdu_mlme.tid_state_tx[i]?
189 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
190
191 p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :");
192 for (i = 0; i < STA_TID_NUM; i++)
193 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
194 sta->ampdu_mlme.tid_state_tx[i]?
195 sta->ampdu_mlme.tid_tx[i]->ssn : 0);
196
197 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
198
199 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
200}
201
202static ssize_t sta_agg_status_write(struct file *file,
203 const char __user *user_buf, size_t count, loff_t *ppos)
204{
205 struct sta_info *sta = file->private_data;
206 struct net_device *dev = sta->sdata->dev;
207 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
208 struct ieee80211_hw *hw = &local->hw;
209 u8 *da = sta->addr;
210 static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0,
211 0, 0, 0, 0, 0, 0, 0, 0};
212 static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1,
213 1, 1, 1, 1, 1, 1, 1, 1};
214 char *endp;
215 char buf[32];
216 int buf_size, rs;
217 unsigned int tid_num;
218 char state[4];
219
220 memset(buf, 0x00, sizeof(buf));
221 buf_size = min(count, (sizeof(buf)-1));
222 if (copy_from_user(buf, user_buf, buf_size))
223 return -EFAULT;
224
225 tid_num = simple_strtoul(buf, &endp, 0);
226 if (endp == buf)
227 return -EINVAL;
228
229 if ((tid_num >= 100) && (tid_num <= 115)) {
230 /* toggle Rx aggregation command */
231 tid_num = tid_num - 100;
232 if (tid_static_rx[tid_num] == 1) {
233 strcpy(state, "off ");
234 ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0,
235 WLAN_REASON_QSTA_REQUIRE_SETUP);
236 sta->ampdu_mlme.tid_state_rx[tid_num] |=
237 HT_AGG_STATE_DEBUGFS_CTL;
238 tid_static_rx[tid_num] = 0;
239 } else {
240 strcpy(state, "on ");
241 sta->ampdu_mlme.tid_state_rx[tid_num] &=
242 ~HT_AGG_STATE_DEBUGFS_CTL;
243 tid_static_rx[tid_num] = 1;
244 }
245 printk(KERN_DEBUG "debugfs - try switching tid %u %s\n",
246 tid_num, state);
247 } else if ((tid_num >= 0) && (tid_num <= 15)) {
248 /* toggle Tx aggregation command */
249 if (tid_static_tx[tid_num] == 0) {
250 strcpy(state, "on ");
251 rs = ieee80211_start_tx_ba_session(hw, da, tid_num);
252 if (rs == 0)
253 tid_static_tx[tid_num] = 1;
254 } else {
255 strcpy(state, "off");
256 rs = ieee80211_stop_tx_ba_session(hw, da, tid_num, 1);
257 if (rs == 0)
258 tid_static_tx[tid_num] = 0;
259 }
260 printk(KERN_DEBUG "debugfs - switching tid %u %s, return=%d\n",
261 tid_num, state, rs);
262 }
263
264 return count;
265}
266STA_OPS_WR(agg_status);
267
194#define DEBUGFS_ADD(name) \ 268#define DEBUGFS_ADD(name) \
195 sta->debugfs.name = debugfs_create_file(#name, 0444, \ 269 sta->debugfs.name = debugfs_create_file(#name, 0400, \
196 sta->debugfs.dir, sta, &sta_ ##name## _ops); 270 sta->debugfs.dir, sta, &sta_ ##name## _ops);
197 271
198#define DEBUGFS_DEL(name) \ 272#define DEBUGFS_DEL(name) \
@@ -203,12 +277,13 @@ STA_OPS(wme_tx_queue);
203void ieee80211_sta_debugfs_add(struct sta_info *sta) 277void ieee80211_sta_debugfs_add(struct sta_info *sta)
204{ 278{
205 struct dentry *stations_dir = sta->local->debugfs.stations; 279 struct dentry *stations_dir = sta->local->debugfs.stations;
206 DECLARE_MAC_BUF(mac); 280 DECLARE_MAC_BUF(mbuf);
281 u8 *mac;
207 282
208 if (!stations_dir) 283 if (!stations_dir)
209 return; 284 return;
210 285
211 print_mac(mac, sta->addr); 286 mac = print_mac(mbuf, sta->addr);
212 287
213 sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); 288 sta->debugfs.dir = debugfs_create_dir(mac, stations_dir);
214 if (!sta->debugfs.dir) 289 if (!sta->debugfs.dir)
@@ -216,28 +291,26 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
216 291
217 DEBUGFS_ADD(flags); 292 DEBUGFS_ADD(flags);
218 DEBUGFS_ADD(num_ps_buf_frames); 293 DEBUGFS_ADD(num_ps_buf_frames);
219 DEBUGFS_ADD(last_ack_rssi);
220 DEBUGFS_ADD(last_ack_ms);
221 DEBUGFS_ADD(inactive_ms); 294 DEBUGFS_ADD(inactive_ms);
222 DEBUGFS_ADD(last_seq_ctrl); 295 DEBUGFS_ADD(last_seq_ctrl);
223#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 296#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
224 DEBUGFS_ADD(wme_rx_queue); 297 DEBUGFS_ADD(wme_rx_queue);
225 DEBUGFS_ADD(wme_tx_queue); 298 DEBUGFS_ADD(wme_tx_queue);
226#endif 299#endif
300 DEBUGFS_ADD(agg_status);
227} 301}
228 302
229void ieee80211_sta_debugfs_remove(struct sta_info *sta) 303void ieee80211_sta_debugfs_remove(struct sta_info *sta)
230{ 304{
231 DEBUGFS_DEL(flags); 305 DEBUGFS_DEL(flags);
232 DEBUGFS_DEL(num_ps_buf_frames); 306 DEBUGFS_DEL(num_ps_buf_frames);
233 DEBUGFS_DEL(last_ack_rssi);
234 DEBUGFS_DEL(last_ack_ms);
235 DEBUGFS_DEL(inactive_ms); 307 DEBUGFS_DEL(inactive_ms);
236 DEBUGFS_DEL(last_seq_ctrl); 308 DEBUGFS_DEL(last_seq_ctrl);
237#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 309#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
238 DEBUGFS_DEL(wme_rx_queue); 310 DEBUGFS_DEL(wme_rx_queue);
239 DEBUGFS_DEL(wme_tx_queue); 311 DEBUGFS_DEL(wme_tx_queue);
240#endif 312#endif
313 DEBUGFS_DEL(agg_status);
241 314
242 debugfs_remove(sta->debugfs.dir); 315 debugfs_remove(sta->debugfs.dir);
243 sta->debugfs.dir = NULL; 316 sta->debugfs.dir = NULL;
diff --git a/net/mac80211/debugfs_sta.h b/net/mac80211/debugfs_sta.h
index 574a1cd54b96..8b608903259f 100644
--- a/net/mac80211/debugfs_sta.h
+++ b/net/mac80211/debugfs_sta.h
@@ -1,6 +1,8 @@
1#ifndef __MAC80211_DEBUGFS_STA_H 1#ifndef __MAC80211_DEBUGFS_STA_H
2#define __MAC80211_DEBUGFS_STA_H 2#define __MAC80211_DEBUGFS_STA_H
3 3
4#include "sta_info.h"
5
4#ifdef CONFIG_MAC80211_DEBUGFS 6#ifdef CONFIG_MAC80211_DEBUGFS
5void ieee80211_sta_debugfs_add(struct sta_info *sta); 7void ieee80211_sta_debugfs_add(struct sta_info *sta);
6void ieee80211_sta_debugfs_remove(struct sta_info *sta); 8void ieee80211_sta_debugfs_remove(struct sta_info *sta);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 72ecbf7bf962..8e53ce7ed444 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -23,7 +23,7 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/etherdevice.h> 24#include <linux/etherdevice.h>
25#include <net/wireless.h> 25#include <net/wireless.h>
26#include "ieee80211_key.h" 26#include "key.h"
27#include "sta_info.h" 27#include "sta_info.h"
28 28
29/* ieee80211.o internal definitions, etc. These are not included into 29/* ieee80211.o internal definitions, etc. These are not included into
@@ -35,9 +35,9 @@
35 35
36#define WLAN_FC_DATA_PRESENT(fc) (((fc) & 0x4c) == 0x08) 36#define WLAN_FC_DATA_PRESENT(fc) (((fc) & 0x4c) == 0x08)
37 37
38struct ieee80211_local; 38#define IEEE80211_FC(type, subtype) cpu_to_le16(type | subtype)
39 39
40#define IEEE80211_ALIGN32_PAD(a) ((4 - ((a) & 3)) & 3) 40struct ieee80211_local;
41 41
42/* Maximum number of broadcast/multicast frames to buffer when some of the 42/* Maximum number of broadcast/multicast frames to buffer when some of the
43 * associated stations are using power saving. */ 43 * associated stations are using power saving. */
@@ -73,14 +73,14 @@ struct ieee80211_fragment_entry {
73struct ieee80211_sta_bss { 73struct ieee80211_sta_bss {
74 struct list_head list; 74 struct list_head list;
75 struct ieee80211_sta_bss *hnext; 75 struct ieee80211_sta_bss *hnext;
76 size_t ssid_len;
77
76 atomic_t users; 78 atomic_t users;
77 79
78 u8 bssid[ETH_ALEN]; 80 u8 bssid[ETH_ALEN];
79 u8 ssid[IEEE80211_MAX_SSID_LEN]; 81 u8 ssid[IEEE80211_MAX_SSID_LEN];
80 size_t ssid_len;
81 u16 capability; /* host byte order */ 82 u16 capability; /* host byte order */
82 int hw_mode; 83 enum ieee80211_band band;
83 int channel;
84 int freq; 84 int freq;
85 int rssi, signal, noise; 85 int rssi, signal, noise;
86 u8 *wpa_ie; 86 u8 *wpa_ie;
@@ -91,13 +91,18 @@ struct ieee80211_sta_bss {
91 size_t wmm_ie_len; 91 size_t wmm_ie_len;
92 u8 *ht_ie; 92 u8 *ht_ie;
93 size_t ht_ie_len; 93 size_t ht_ie_len;
94#ifdef CONFIG_MAC80211_MESH
95 u8 *mesh_id;
96 size_t mesh_id_len;
97 u8 *mesh_cfg;
98#endif
94#define IEEE80211_MAX_SUPP_RATES 32 99#define IEEE80211_MAX_SUPP_RATES 32
95 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 100 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
96 size_t supp_rates_len; 101 size_t supp_rates_len;
97 int beacon_int;
98 u64 timestamp; 102 u64 timestamp;
103 int beacon_int;
99 104
100 int probe_resp; 105 bool probe_resp;
101 unsigned long last_update; 106 unsigned long last_update;
102 107
103 /* during assocation, we save an ERP value from a probe response so 108 /* during assocation, we save an ERP value from a probe response so
@@ -108,56 +113,98 @@ struct ieee80211_sta_bss {
108 u8 erp_value; 113 u8 erp_value;
109}; 114};
110 115
116static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss)
117{
118#ifdef CONFIG_MAC80211_MESH
119 return bss->mesh_cfg;
120#endif
121 return NULL;
122}
111 123
112typedef enum { 124static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss)
113 TXRX_CONTINUE, TXRX_DROP, TXRX_QUEUED 125{
114} ieee80211_txrx_result; 126#ifdef CONFIG_MAC80211_MESH
127 return bss->mesh_id;
128#endif
129 return NULL;
130}
115 131
116/* flags used in struct ieee80211_txrx_data.flags */ 132static inline u8 bss_mesh_id_len(struct ieee80211_sta_bss *bss)
117/* whether the MSDU was fragmented */ 133{
118#define IEEE80211_TXRXD_FRAGMENTED BIT(0) 134#ifdef CONFIG_MAC80211_MESH
119#define IEEE80211_TXRXD_TXUNICAST BIT(1) 135 return bss->mesh_id_len;
120#define IEEE80211_TXRXD_TXPS_BUFFERED BIT(2) 136#endif
121#define IEEE80211_TXRXD_TXPROBE_LAST_FRAG BIT(3) 137 return 0;
122#define IEEE80211_TXRXD_RXIN_SCAN BIT(4) 138}
123/* frame is destined to interface currently processed (incl. multicast frames) */ 139
124#define IEEE80211_TXRXD_RXRA_MATCH BIT(5) 140
125#define IEEE80211_TXRXD_TX_INJECTED BIT(6) 141typedef unsigned __bitwise__ ieee80211_tx_result;
126#define IEEE80211_TXRXD_RX_AMSDU BIT(7) 142#define TX_CONTINUE ((__force ieee80211_tx_result) 0u)
127struct ieee80211_txrx_data { 143#define TX_DROP ((__force ieee80211_tx_result) 1u)
144#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
145
146#define IEEE80211_TX_FRAGMENTED BIT(0)
147#define IEEE80211_TX_UNICAST BIT(1)
148#define IEEE80211_TX_PS_BUFFERED BIT(2)
149#define IEEE80211_TX_PROBE_LAST_FRAG BIT(3)
150#define IEEE80211_TX_INJECTED BIT(4)
151
152struct ieee80211_tx_data {
128 struct sk_buff *skb; 153 struct sk_buff *skb;
129 struct net_device *dev; 154 struct net_device *dev;
130 struct ieee80211_local *local; 155 struct ieee80211_local *local;
131 struct ieee80211_sub_if_data *sdata; 156 struct ieee80211_sub_if_data *sdata;
132 struct sta_info *sta; 157 struct sta_info *sta;
158 struct ieee80211_key *key;
159
160 struct ieee80211_tx_control *control;
161 struct ieee80211_channel *channel;
162 struct ieee80211_rate *rate;
163 /* use this rate (if set) for last fragment; rate can
164 * be set to lower rate for the first fragments, e.g.,
165 * when using CTS protection with IEEE 802.11g. */
166 struct ieee80211_rate *last_frag_rate;
167
168 /* Extra fragments (in addition to the first fragment
169 * in skb) */
170 struct sk_buff **extra_frag;
171 int num_extra_frag;
172
133 u16 fc, ethertype; 173 u16 fc, ethertype;
174 unsigned int flags;
175};
176
177
178typedef unsigned __bitwise__ ieee80211_rx_result;
179#define RX_CONTINUE ((__force ieee80211_rx_result) 0u)
180#define RX_DROP_UNUSABLE ((__force ieee80211_rx_result) 1u)
181#define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u)
182#define RX_QUEUED ((__force ieee80211_rx_result) 3u)
183
184#define IEEE80211_RX_IN_SCAN BIT(0)
185/* frame is destined to interface currently processed (incl. multicast frames) */
186#define IEEE80211_RX_RA_MATCH BIT(1)
187#define IEEE80211_RX_AMSDU BIT(2)
188#define IEEE80211_RX_CMNTR_REPORTED BIT(3)
189#define IEEE80211_RX_FRAGMENTED BIT(4)
190
191struct ieee80211_rx_data {
192 struct sk_buff *skb;
193 struct net_device *dev;
194 struct ieee80211_local *local;
195 struct ieee80211_sub_if_data *sdata;
196 struct sta_info *sta;
134 struct ieee80211_key *key; 197 struct ieee80211_key *key;
198 struct ieee80211_rx_status *status;
199 struct ieee80211_rate *rate;
200
201 u16 fc, ethertype;
135 unsigned int flags; 202 unsigned int flags;
136 union { 203 int sent_ps_buffered;
137 struct { 204 int queue;
138 struct ieee80211_tx_control *control; 205 int load;
139 struct ieee80211_hw_mode *mode; 206 u32 tkip_iv32;
140 struct ieee80211_rate *rate; 207 u16 tkip_iv16;
141 /* use this rate (if set) for last fragment; rate can
142 * be set to lower rate for the first fragments, e.g.,
143 * when using CTS protection with IEEE 802.11g. */
144 struct ieee80211_rate *last_frag_rate;
145 int last_frag_hwrate;
146
147 /* Extra fragments (in addition to the first fragment
148 * in skb) */
149 int num_extra_frag;
150 struct sk_buff **extra_frag;
151 } tx;
152 struct {
153 struct ieee80211_rx_status *status;
154 int sent_ps_buffered;
155 int queue;
156 int load;
157 u32 tkip_iv32;
158 u16 tkip_iv16;
159 } rx;
160 } u;
161}; 208};
162 209
163/* flags used in struct ieee80211_tx_packet_data.flags */ 210/* flags used in struct ieee80211_tx_packet_data.flags */
@@ -165,6 +212,7 @@ struct ieee80211_txrx_data {
165#define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1) 212#define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1)
166#define IEEE80211_TXPD_REQUEUE BIT(2) 213#define IEEE80211_TXPD_REQUEUE BIT(2)
167#define IEEE80211_TXPD_EAPOL_FRAME BIT(3) 214#define IEEE80211_TXPD_EAPOL_FRAME BIT(3)
215#define IEEE80211_TXPD_AMPDU BIT(4)
168/* Stored in sk_buff->cb */ 216/* Stored in sk_buff->cb */
169struct ieee80211_tx_packet_data { 217struct ieee80211_tx_packet_data {
170 int ifindex; 218 int ifindex;
@@ -176,20 +224,12 @@ struct ieee80211_tx_packet_data {
176struct ieee80211_tx_stored_packet { 224struct ieee80211_tx_stored_packet {
177 struct ieee80211_tx_control control; 225 struct ieee80211_tx_control control;
178 struct sk_buff *skb; 226 struct sk_buff *skb;
179 int num_extra_frag;
180 struct sk_buff **extra_frag; 227 struct sk_buff **extra_frag;
181 int last_frag_rateidx;
182 int last_frag_hwrate;
183 struct ieee80211_rate *last_frag_rate; 228 struct ieee80211_rate *last_frag_rate;
229 int num_extra_frag;
184 unsigned int last_frag_rate_ctrl_probe; 230 unsigned int last_frag_rate_ctrl_probe;
185}; 231};
186 232
187typedef ieee80211_txrx_result (*ieee80211_tx_handler)
188(struct ieee80211_txrx_data *tx);
189
190typedef ieee80211_txrx_result (*ieee80211_rx_handler)
191(struct ieee80211_txrx_data *rx);
192
193struct beacon_data { 233struct beacon_data {
194 u8 *head, *tail; 234 u8 *head, *tail;
195 int head_len, tail_len; 235 int head_len, tail_len;
@@ -206,10 +246,10 @@ struct ieee80211_if_ap {
206 246
207 /* yes, this looks ugly, but guarantees that we can later use 247 /* yes, this looks ugly, but guarantees that we can later use
208 * bitmap_empty :) 248 * bitmap_empty :)
209 * NB: don't ever use set_bit, use bss_tim_set/bss_tim_clear! */ 249 * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */
210 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; 250 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
211 atomic_t num_sta_ps; /* number of stations in PS mode */
212 struct sk_buff_head ps_bc_buf; 251 struct sk_buff_head ps_bc_buf;
252 atomic_t num_sta_ps; /* number of stations in PS mode */
213 int dtim_count; 253 int dtim_count;
214 int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ 254 int force_unicast_rateidx; /* forced TX rateidx for unicast frames */
215 int max_ratectrl_rateidx; /* max TX rateidx for rate control */ 255 int max_ratectrl_rateidx; /* max TX rateidx for rate control */
@@ -217,8 +257,8 @@ struct ieee80211_if_ap {
217}; 257};
218 258
219struct ieee80211_if_wds { 259struct ieee80211_if_wds {
220 u8 remote_addr[ETH_ALEN];
221 struct sta_info *sta; 260 struct sta_info *sta;
261 u8 remote_addr[ETH_ALEN];
222}; 262};
223 263
224struct ieee80211_if_vlan { 264struct ieee80211_if_vlan {
@@ -226,6 +266,41 @@ struct ieee80211_if_vlan {
226 struct list_head list; 266 struct list_head list;
227}; 267};
228 268
269struct mesh_stats {
270 __u32 fwded_frames; /* Mesh forwarded frames */
271 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
272 __u32 dropped_frames_no_route; /* Not transmitted, no route found */
273 atomic_t estab_plinks;
274};
275
276#define PREQ_Q_F_START 0x1
277#define PREQ_Q_F_REFRESH 0x2
278struct mesh_preq_queue {
279 struct list_head list;
280 u8 dst[ETH_ALEN];
281 u8 flags;
282};
283
284struct mesh_config {
285 /* Timeouts in ms */
286 /* Mesh plink management parameters */
287 u16 dot11MeshRetryTimeout;
288 u16 dot11MeshConfirmTimeout;
289 u16 dot11MeshHoldingTimeout;
290 u16 dot11MeshMaxPeerLinks;
291 u8 dot11MeshMaxRetries;
292 u8 dot11MeshTTL;
293 bool auto_open_plinks;
294 /* HWMP parameters */
295 u8 dot11MeshHWMPmaxPREQretries;
296 u32 path_refresh_time;
297 u16 min_discovery_timeout;
298 u32 dot11MeshHWMPactivePathTimeout;
299 u16 dot11MeshHWMPpreqMinInterval;
300 u16 dot11MeshHWMPnetDiameterTraversalTime;
301};
302
303
229/* flags used in struct ieee80211_if_sta.flags */ 304/* flags used in struct ieee80211_if_sta.flags */
230#define IEEE80211_STA_SSID_SET BIT(0) 305#define IEEE80211_STA_SSID_SET BIT(0)
231#define IEEE80211_STA_BSSID_SET BIT(1) 306#define IEEE80211_STA_BSSID_SET BIT(1)
@@ -241,18 +316,47 @@ struct ieee80211_if_vlan {
241#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) 316#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
242#define IEEE80211_STA_PRIVACY_INVOKED BIT(13) 317#define IEEE80211_STA_PRIVACY_INVOKED BIT(13)
243struct ieee80211_if_sta { 318struct ieee80211_if_sta {
244 enum {
245 IEEE80211_DISABLED, IEEE80211_AUTHENTICATE,
246 IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED,
247 IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED
248 } state;
249 struct timer_list timer; 319 struct timer_list timer;
250 struct work_struct work; 320 struct work_struct work;
251 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 321 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
252 u8 ssid[IEEE80211_MAX_SSID_LEN]; 322 u8 ssid[IEEE80211_MAX_SSID_LEN];
323 enum {
324 IEEE80211_DISABLED, IEEE80211_AUTHENTICATE,
325 IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED,
326 IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED,
327 IEEE80211_MESH_UP
328 } state;
253 size_t ssid_len; 329 size_t ssid_len;
254 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 330 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
255 size_t scan_ssid_len; 331 size_t scan_ssid_len;
332#ifdef CONFIG_MAC80211_MESH
333 struct timer_list mesh_path_timer;
334 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
335 size_t mesh_id_len;
336 /* Active Path Selection Protocol Identifier */
337 u8 mesh_pp_id[4];
338 /* Active Path Selection Metric Identifier */
339 u8 mesh_pm_id[4];
340 /* Congestion Control Mode Identifier */
341 u8 mesh_cc_id[4];
342 /* Local mesh Destination Sequence Number */
343 u32 dsn;
344 /* Last used PREQ ID */
345 u32 preq_id;
346 atomic_t mpaths;
347 /* Timestamp of last DSN update */
348 unsigned long last_dsn_update;
349 /* Timestamp of last DSN sent */
350 unsigned long last_preq;
351 struct mesh_rmc *rmc;
352 spinlock_t mesh_preq_queue_lock;
353 struct mesh_preq_queue preq_queue;
354 int preq_queue_len;
355 struct mesh_stats mshstats;
356 struct mesh_config mshcfg;
357 u8 mesh_seqnum[3];
358 bool accepting_plinks;
359#endif
256 u16 aid; 360 u16 aid;
257 u16 ap_capab, capab; 361 u16 ap_capab, capab;
258 u8 *extra_ie; /* to be added to the end of AssocReq */ 362 u8 *extra_ie; /* to be added to the end of AssocReq */
@@ -262,16 +366,18 @@ struct ieee80211_if_sta {
262 u8 *assocreq_ies, *assocresp_ies; 366 u8 *assocreq_ies, *assocresp_ies;
263 size_t assocreq_ies_len, assocresp_ies_len; 367 size_t assocreq_ies_len, assocresp_ies_len;
264 368
369 struct sk_buff_head skb_queue;
370
265 int auth_tries, assoc_tries; 371 int auth_tries, assoc_tries;
266 372
373 unsigned long request;
374
375 unsigned long last_probe;
376
267 unsigned int flags; 377 unsigned int flags;
268#define IEEE80211_STA_REQ_SCAN 0 378#define IEEE80211_STA_REQ_SCAN 0
269#define IEEE80211_STA_REQ_AUTH 1 379#define IEEE80211_STA_REQ_AUTH 1
270#define IEEE80211_STA_REQ_RUN 2 380#define IEEE80211_STA_REQ_RUN 2
271 unsigned long request;
272 struct sk_buff_head skb_queue;
273
274 unsigned long last_probe;
275 381
276#define IEEE80211_AUTH_ALG_OPEN BIT(0) 382#define IEEE80211_AUTH_ALG_OPEN BIT(0)
277#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) 383#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
@@ -282,16 +388,34 @@ struct ieee80211_if_sta {
282 388
283 unsigned long ibss_join_req; 389 unsigned long ibss_join_req;
284 struct sk_buff *probe_resp; /* ProbeResp template for IBSS */ 390 struct sk_buff *probe_resp; /* ProbeResp template for IBSS */
285 u32 supp_rates_bits; 391 u32 supp_rates_bits[IEEE80211_NUM_BANDS];
286 392
287 int wmm_last_param_set; 393 int wmm_last_param_set;
394 int num_beacons; /* number of TXed beacon frames by this STA */
288}; 395};
289 396
397static inline void ieee80211_if_sta_set_mesh_id(struct ieee80211_if_sta *ifsta,
398 u8 mesh_id_len, u8 *mesh_id)
399{
400#ifdef CONFIG_MAC80211_MESH
401 ifsta->mesh_id_len = mesh_id_len;
402 memcpy(ifsta->mesh_id, mesh_id, mesh_id_len);
403#endif
404}
405
406#ifdef CONFIG_MAC80211_MESH
407#define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \
408 do { (sta)->mshstats.name++; } while (0)
409#else
410#define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \
411 do { } while (0)
412#endif
290 413
291/* flags used in struct ieee80211_sub_if_data.flags */ 414/* flags used in struct ieee80211_sub_if_data.flags */
292#define IEEE80211_SDATA_ALLMULTI BIT(0) 415#define IEEE80211_SDATA_ALLMULTI BIT(0)
293#define IEEE80211_SDATA_PROMISC BIT(1) 416#define IEEE80211_SDATA_PROMISC BIT(1)
294#define IEEE80211_SDATA_USERSPACE_MLME BIT(2) 417#define IEEE80211_SDATA_USERSPACE_MLME BIT(2)
418#define IEEE80211_SDATA_OPERATING_GMODE BIT(3)
295struct ieee80211_sub_if_data { 419struct ieee80211_sub_if_data {
296 struct list_head list; 420 struct list_head list;
297 421
@@ -306,11 +430,11 @@ struct ieee80211_sub_if_data {
306 unsigned int flags; 430 unsigned int flags;
307 431
308 int drop_unencrypted; 432 int drop_unencrypted;
433
309 /* 434 /*
310 * IEEE 802.1X Port access control in effect, 435 * basic rates of this AP or the AP we're associated to
311 * drop packets to/from unauthorized port
312 */ 436 */
313 int ieee802_1x_pac; 437 u64 basic_rates;
314 438
315 u16 sequence; 439 u16 sequence;
316 440
@@ -338,6 +462,7 @@ struct ieee80211_sub_if_data {
338 struct ieee80211_if_wds wds; 462 struct ieee80211_if_wds wds;
339 struct ieee80211_if_vlan vlan; 463 struct ieee80211_if_vlan vlan;
340 struct ieee80211_if_sta sta; 464 struct ieee80211_if_sta sta;
465 u32 mntr_flags;
341 } u; 466 } u;
342 int channel_use; 467 int channel_use;
343 int channel_use_raw; 468 int channel_use_raw;
@@ -348,7 +473,6 @@ struct ieee80211_sub_if_data {
348 struct { 473 struct {
349 struct dentry *channel_use; 474 struct dentry *channel_use;
350 struct dentry *drop_unencrypted; 475 struct dentry *drop_unencrypted;
351 struct dentry *ieee802_1x_pac;
352 struct dentry *state; 476 struct dentry *state;
353 struct dentry *bssid; 477 struct dentry *bssid;
354 struct dentry *prev_bssid; 478 struct dentry *prev_bssid;
@@ -363,11 +487,11 @@ struct ieee80211_sub_if_data {
363 struct dentry *auth_alg; 487 struct dentry *auth_alg;
364 struct dentry *auth_transaction; 488 struct dentry *auth_transaction;
365 struct dentry *flags; 489 struct dentry *flags;
490 struct dentry *num_beacons_sta;
366 } sta; 491 } sta;
367 struct { 492 struct {
368 struct dentry *channel_use; 493 struct dentry *channel_use;
369 struct dentry *drop_unencrypted; 494 struct dentry *drop_unencrypted;
370 struct dentry *ieee802_1x_pac;
371 struct dentry *num_sta_ps; 495 struct dentry *num_sta_ps;
372 struct dentry *dtim_count; 496 struct dentry *dtim_count;
373 struct dentry *num_beacons; 497 struct dentry *num_beacons;
@@ -378,19 +502,46 @@ struct ieee80211_sub_if_data {
378 struct { 502 struct {
379 struct dentry *channel_use; 503 struct dentry *channel_use;
380 struct dentry *drop_unencrypted; 504 struct dentry *drop_unencrypted;
381 struct dentry *ieee802_1x_pac;
382 struct dentry *peer; 505 struct dentry *peer;
383 } wds; 506 } wds;
384 struct { 507 struct {
385 struct dentry *channel_use; 508 struct dentry *channel_use;
386 struct dentry *drop_unencrypted; 509 struct dentry *drop_unencrypted;
387 struct dentry *ieee802_1x_pac;
388 } vlan; 510 } vlan;
389 struct { 511 struct {
390 struct dentry *mode; 512 struct dentry *mode;
391 } monitor; 513 } monitor;
392 struct dentry *default_key; 514 struct dentry *default_key;
393 } debugfs; 515 } debugfs;
516
517#ifdef CONFIG_MAC80211_MESH
518 struct dentry *mesh_stats_dir;
519 struct {
520 struct dentry *fwded_frames;
521 struct dentry *dropped_frames_ttl;
522 struct dentry *dropped_frames_no_route;
523 struct dentry *estab_plinks;
524 struct timer_list mesh_path_timer;
525 } mesh_stats;
526
527 struct dentry *mesh_config_dir;
528 struct {
529 struct dentry *dot11MeshRetryTimeout;
530 struct dentry *dot11MeshConfirmTimeout;
531 struct dentry *dot11MeshHoldingTimeout;
532 struct dentry *dot11MeshMaxRetries;
533 struct dentry *dot11MeshTTL;
534 struct dentry *auto_open_plinks;
535 struct dentry *dot11MeshMaxPeerLinks;
536 struct dentry *dot11MeshHWMPactivePathTimeout;
537 struct dentry *dot11MeshHWMPpreqMinInterval;
538 struct dentry *dot11MeshHWMPnetDiameterTraversalTime;
539 struct dentry *dot11MeshHWMPmaxPREQretries;
540 struct dentry *path_refresh_time;
541 struct dentry *min_discovery_timeout;
542 } mesh_config;
543#endif
544
394#endif 545#endif
395 /* must be last, dynamically sized area in this! */ 546 /* must be last, dynamically sized area in this! */
396 struct ieee80211_vif vif; 547 struct ieee80211_vif vif;
@@ -407,6 +558,8 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
407enum { 558enum {
408 IEEE80211_RX_MSG = 1, 559 IEEE80211_RX_MSG = 1,
409 IEEE80211_TX_STATUS_MSG = 2, 560 IEEE80211_TX_STATUS_MSG = 2,
561 IEEE80211_DELBA_MSG = 3,
562 IEEE80211_ADDBA_MSG = 4,
410}; 563};
411 564
412struct ieee80211_local { 565struct ieee80211_local {
@@ -417,15 +570,15 @@ struct ieee80211_local {
417 570
418 const struct ieee80211_ops *ops; 571 const struct ieee80211_ops *ops;
419 572
420 /* List of registered struct ieee80211_hw_mode */
421 struct list_head modes_list;
422
423 struct net_device *mdev; /* wmaster# - "master" 802.11 device */ 573 struct net_device *mdev; /* wmaster# - "master" 802.11 device */
424 int open_count; 574 int open_count;
425 int monitors; 575 int monitors, cooked_mntrs;
576 /* number of interfaces with corresponding FIF_ flags */
577 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss;
426 unsigned int filter_flags; /* FIF_* */ 578 unsigned int filter_flags; /* FIF_* */
427 struct iw_statistics wstats; 579 struct iw_statistics wstats;
428 u8 wstats_flags; 580 u8 wstats_flags;
581 bool tim_in_locked_section; /* see ieee80211_beacon_get() */
429 int tx_headroom; /* required headroom for hardware/radiotap */ 582 int tx_headroom; /* required headroom for hardware/radiotap */
430 583
431 enum { 584 enum {
@@ -443,15 +596,22 @@ struct ieee80211_local {
443 struct sk_buff_head skb_queue; 596 struct sk_buff_head skb_queue;
444 struct sk_buff_head skb_queue_unreliable; 597 struct sk_buff_head skb_queue_unreliable;
445 598
446 /* Station data structures */ 599 /* Station data */
447 rwlock_t sta_lock; /* protects STA data structures */ 600 /*
448 int num_sta; /* number of stations in sta_list */ 601 * The lock only protects the list, hash, timer and counter
602 * against manipulation, reads are done in RCU. Additionally,
603 * the lock protects each BSS's TIM bitmap.
604 */
605 spinlock_t sta_lock;
606 unsigned long num_sta;
449 struct list_head sta_list; 607 struct list_head sta_list;
608 struct list_head sta_flush_list;
609 struct work_struct sta_flush_work;
450 struct sta_info *sta_hash[STA_HASH_SIZE]; 610 struct sta_info *sta_hash[STA_HASH_SIZE];
451 struct timer_list sta_cleanup; 611 struct timer_list sta_cleanup;
452 612
453 unsigned long state[NUM_TX_DATA_QUEUES]; 613 unsigned long state[NUM_TX_DATA_QUEUES_AMPDU];
454 struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES]; 614 struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES_AMPDU];
455 struct tasklet_struct tx_pending_tasklet; 615 struct tasklet_struct tx_pending_tasklet;
456 616
457 /* number of interfaces with corresponding IFF_ flags */ 617 /* number of interfaces with corresponding IFF_ flags */
@@ -459,11 +619,6 @@ struct ieee80211_local {
459 619
460 struct rate_control_ref *rate_ctrl; 620 struct rate_control_ref *rate_ctrl;
461 621
462 /* Supported and basic rate filters for different modes. These are
463 * pointers to -1 terminated lists and rates in 100 kbps units. */
464 int *supp_rates[NUM_IEEE80211_MODES];
465 int *basic_rates[NUM_IEEE80211_MODES];
466
467 int rts_threshold; 622 int rts_threshold;
468 int fragmentation_threshold; 623 int fragmentation_threshold;
469 int short_retry_limit; /* dot11ShortRetryLimit */ 624 int short_retry_limit; /* dot11ShortRetryLimit */
@@ -477,21 +632,25 @@ struct ieee80211_local {
477 * deliver multicast frames both back to wireless 632 * deliver multicast frames both back to wireless
478 * media and to the local net stack */ 633 * media and to the local net stack */
479 634
480 ieee80211_rx_handler *rx_pre_handlers;
481 ieee80211_rx_handler *rx_handlers;
482 ieee80211_tx_handler *tx_handlers;
483
484 struct list_head interfaces; 635 struct list_head interfaces;
485 636
637 /*
638 * Key lock, protects sdata's key_list and sta_info's
639 * key pointers (write access, they're RCU.)
640 */
641 spinlock_t key_lock;
642
643
486 bool sta_sw_scanning; 644 bool sta_sw_scanning;
487 bool sta_hw_scanning; 645 bool sta_hw_scanning;
488 int scan_channel_idx; 646 int scan_channel_idx;
647 enum ieee80211_band scan_band;
648
489 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; 649 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
490 unsigned long last_scan_completed; 650 unsigned long last_scan_completed;
491 struct delayed_work scan_work; 651 struct delayed_work scan_work;
492 struct net_device *scan_dev; 652 struct net_device *scan_dev;
493 struct ieee80211_channel *oper_channel, *scan_channel; 653 struct ieee80211_channel *oper_channel, *scan_channel;
494 struct ieee80211_hw_mode *oper_hw_mode, *scan_hw_mode;
495 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 654 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
496 size_t scan_ssid_len; 655 size_t scan_ssid_len;
497 struct list_head sta_bss_list; 656 struct list_head sta_bss_list;
@@ -560,14 +719,8 @@ struct ieee80211_local {
560 int wifi_wme_noack_test; 719 int wifi_wme_noack_test;
561 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 720 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
562 721
563 unsigned int enabled_modes; /* bitfield of allowed modes;
564 * (1 << MODE_*) */
565 unsigned int hw_modes; /* bitfield of supported hardware modes;
566 * (1 << MODE_*) */
567
568#ifdef CONFIG_MAC80211_DEBUGFS 722#ifdef CONFIG_MAC80211_DEBUGFS
569 struct local_debugfsdentries { 723 struct local_debugfsdentries {
570 struct dentry *channel;
571 struct dentry *frequency; 724 struct dentry *frequency;
572 struct dentry *antenna_sel_tx; 725 struct dentry *antenna_sel_tx;
573 struct dentry *antenna_sel_rx; 726 struct dentry *antenna_sel_rx;
@@ -577,9 +730,7 @@ struct ieee80211_local {
577 struct dentry *short_retry_limit; 730 struct dentry *short_retry_limit;
578 struct dentry *long_retry_limit; 731 struct dentry *long_retry_limit;
579 struct dentry *total_ps_buffered; 732 struct dentry *total_ps_buffered;
580 struct dentry *mode;
581 struct dentry *wep_iv; 733 struct dentry *wep_iv;
582 struct dentry *modes;
583 struct dentry *statistics; 734 struct dentry *statistics;
584 struct local_debugfsdentries_statsdentries { 735 struct local_debugfsdentries_statsdentries {
585 struct dentry *transmitted_fragment_count; 736 struct dentry *transmitted_fragment_count;
@@ -627,6 +778,63 @@ struct ieee80211_local {
627#endif 778#endif
628}; 779};
629 780
781/* this struct represents 802.11n's RA/TID combination */
782struct ieee80211_ra_tid {
783 u8 ra[ETH_ALEN];
784 u16 tid;
785};
786
787/* Parsed Information Elements */
788struct ieee802_11_elems {
789 /* pointers to IEs */
790 u8 *ssid;
791 u8 *supp_rates;
792 u8 *fh_params;
793 u8 *ds_params;
794 u8 *cf_params;
795 u8 *tim;
796 u8 *ibss_params;
797 u8 *challenge;
798 u8 *wpa;
799 u8 *rsn;
800 u8 *erp_info;
801 u8 *ext_supp_rates;
802 u8 *wmm_info;
803 u8 *wmm_param;
804 u8 *ht_cap_elem;
805 u8 *ht_info_elem;
806 u8 *mesh_config;
807 u8 *mesh_id;
808 u8 *peer_link;
809 u8 *preq;
810 u8 *prep;
811 u8 *perr;
812
813 /* length of them, respectively */
814 u8 ssid_len;
815 u8 supp_rates_len;
816 u8 fh_params_len;
817 u8 ds_params_len;
818 u8 cf_params_len;
819 u8 tim_len;
820 u8 ibss_params_len;
821 u8 challenge_len;
822 u8 wpa_len;
823 u8 rsn_len;
824 u8 erp_info_len;
825 u8 ext_supp_rates_len;
826 u8 wmm_info_len;
827 u8 wmm_param_len;
828 u8 ht_cap_elem_len;
829 u8 ht_info_elem_len;
830 u8 mesh_config_len;
831 u8 mesh_id_len;
832 u8 peer_link_len;
833 u8 preq_len;
834 u8 prep_len;
835 u8 perr_len;
836};
837
630static inline struct ieee80211_local *hw_to_local( 838static inline struct ieee80211_local *hw_to_local(
631 struct ieee80211_hw *hw) 839 struct ieee80211_hw *hw)
632{ 840{
@@ -650,57 +858,6 @@ struct sta_attribute {
650 ssize_t (*store)(struct sta_info *, const char *buf, size_t count); 858 ssize_t (*store)(struct sta_info *, const char *buf, size_t count);
651}; 859};
652 860
653static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
654{
655 /*
656 * This format has been mandated by the IEEE specifications,
657 * so this line may not be changed to use the __set_bit() format.
658 */
659 bss->tim[aid / 8] |= (1 << (aid % 8));
660}
661
662static inline void bss_tim_set(struct ieee80211_local *local,
663 struct ieee80211_if_ap *bss, u16 aid)
664{
665 read_lock_bh(&local->sta_lock);
666 __bss_tim_set(bss, aid);
667 read_unlock_bh(&local->sta_lock);
668}
669
670static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
671{
672 /*
673 * This format has been mandated by the IEEE specifications,
674 * so this line may not be changed to use the __clear_bit() format.
675 */
676 bss->tim[aid / 8] &= ~(1 << (aid % 8));
677}
678
679static inline void bss_tim_clear(struct ieee80211_local *local,
680 struct ieee80211_if_ap *bss, u16 aid)
681{
682 read_lock_bh(&local->sta_lock);
683 __bss_tim_clear(bss, aid);
684 read_unlock_bh(&local->sta_lock);
685}
686
687/**
688 * ieee80211_is_erp_rate - Check if a rate is an ERP rate
689 * @phymode: The PHY-mode for this rate (MODE_IEEE80211...)
690 * @rate: Transmission rate to check, in 100 kbps
691 *
692 * Check if a given rate is an Extended Rate PHY (ERP) rate.
693 */
694static inline int ieee80211_is_erp_rate(int phymode, int rate)
695{
696 if (phymode == MODE_IEEE80211G) {
697 if (rate != 10 && rate != 20 &&
698 rate != 55 && rate != 110)
699 return 1;
700 }
701 return 0;
702}
703
704static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) 861static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
705{ 862{
706 return compare_ether_addr(raddr, addr) == 0 || 863 return compare_ether_addr(raddr, addr) == 0 ||
@@ -712,16 +869,11 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
712int ieee80211_hw_config(struct ieee80211_local *local); 869int ieee80211_hw_config(struct ieee80211_local *local);
713int ieee80211_if_config(struct net_device *dev); 870int ieee80211_if_config(struct net_device *dev);
714int ieee80211_if_config_beacon(struct net_device *dev); 871int ieee80211_if_config_beacon(struct net_device *dev);
715void ieee80211_prepare_rates(struct ieee80211_local *local, 872void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
716 struct ieee80211_hw_mode *mode);
717void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx);
718int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr);
719void ieee80211_if_setup(struct net_device *dev); 873void ieee80211_if_setup(struct net_device *dev);
720struct ieee80211_rate *ieee80211_get_rate(struct ieee80211_local *local, 874u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
721 int phymode, int hwrate); 875 struct ieee80211_ht_info *req_ht_cap,
722int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht, 876 struct ieee80211_ht_bss_info *req_bss_cap);
723 struct ieee80211_ht_info *req_ht_cap,
724 struct ieee80211_ht_bss_info *req_bss_cap);
725 877
726/* ieee80211_ioctl.c */ 878/* ieee80211_ioctl.c */
727extern const struct iw_handler_def ieee80211_iw_handler_def; 879extern const struct iw_handler_def ieee80211_iw_handler_def;
@@ -747,9 +899,7 @@ extern const struct iw_handler_def ieee80211_iw_handler_def;
747 899
748 900
749/* ieee80211_ioctl.c */ 901/* ieee80211_ioctl.c */
750int ieee80211_set_compression(struct ieee80211_local *local, 902int ieee80211_set_freq(struct ieee80211_local *local, int freq);
751 struct net_device *dev, struct sta_info *sta);
752int ieee80211_set_channel(struct ieee80211_local *local, int channel, int freq);
753/* ieee80211_sta.c */ 903/* ieee80211_sta.c */
754void ieee80211_sta_timer(unsigned long data); 904void ieee80211_sta_timer(unsigned long data);
755void ieee80211_sta_work(struct work_struct *work); 905void ieee80211_sta_work(struct work_struct *work);
@@ -763,9 +913,9 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len);
763void ieee80211_sta_req_auth(struct net_device *dev, 913void ieee80211_sta_req_auth(struct net_device *dev,
764 struct ieee80211_if_sta *ifsta); 914 struct ieee80211_if_sta *ifsta);
765int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len); 915int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len);
766ieee80211_txrx_result ieee80211_sta_rx_scan(struct net_device *dev, 916ieee80211_rx_result ieee80211_sta_rx_scan(
767 struct sk_buff *skb, 917 struct net_device *dev, struct sk_buff *skb,
768 struct ieee80211_rx_status *rx_status); 918 struct ieee80211_rx_status *rx_status);
769void ieee80211_rx_bss_list_init(struct net_device *dev); 919void ieee80211_rx_bss_list_init(struct net_device *dev);
770void ieee80211_rx_bss_list_deinit(struct net_device *dev); 920void ieee80211_rx_bss_list_deinit(struct net_device *dev);
771int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); 921int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len);
@@ -782,12 +932,36 @@ int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
782int ieee80211_ht_addt_info_ie_to_ht_bss_info( 932int ieee80211_ht_addt_info_ie_to_ht_bss_info(
783 struct ieee80211_ht_addt_info *ht_add_info_ie, 933 struct ieee80211_ht_addt_info *ht_add_info_ie,
784 struct ieee80211_ht_bss_info *bss_info); 934 struct ieee80211_ht_bss_info *bss_info);
935void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
936 u16 tid, u8 dialog_token, u16 start_seq_num,
937 u16 agg_size, u16 timeout);
938void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
939 u16 initiator, u16 reason_code);
940
785void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, 941void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da,
786 u16 tid, u16 initiator, u16 reason); 942 u16 tid, u16 initiator, u16 reason);
787void sta_rx_agg_session_timer_expired(unsigned long data); 943void sta_rx_agg_session_timer_expired(unsigned long data);
944void sta_addba_resp_timer_expired(unsigned long data);
945void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr);
946u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
947 struct ieee802_11_elems *elems,
948 enum ieee80211_band band);
949void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
950 int encrypt);
951void ieee802_11_parse_elems(u8 *start, size_t len,
952 struct ieee802_11_elems *elems);
953
954#ifdef CONFIG_MAC80211_MESH
955void ieee80211_start_mesh(struct net_device *dev);
956#else
957static inline void ieee80211_start_mesh(struct net_device *dev)
958{}
959#endif
960
788/* ieee80211_iface.c */ 961/* ieee80211_iface.c */
789int ieee80211_if_add(struct net_device *dev, const char *name, 962int ieee80211_if_add(struct net_device *dev, const char *name,
790 struct net_device **new_dev, int type); 963 struct net_device **new_dev, int type,
964 struct vif_params *params);
791void ieee80211_if_set_type(struct net_device *dev, int type); 965void ieee80211_if_set_type(struct net_device *dev, int type);
792void ieee80211_if_reinit(struct net_device *dev); 966void ieee80211_if_reinit(struct net_device *dev);
793void __ieee80211_if_del(struct ieee80211_local *local, 967void __ieee80211_if_del(struct ieee80211_local *local,
@@ -796,16 +970,7 @@ int ieee80211_if_remove(struct net_device *dev, const char *name, int id);
796void ieee80211_if_free(struct net_device *dev); 970void ieee80211_if_free(struct net_device *dev);
797void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata); 971void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata);
798 972
799/* regdomain.c */
800void ieee80211_regdomain_init(void);
801void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode);
802
803/* rx handling */
804extern ieee80211_rx_handler ieee80211_rx_pre_handlers[];
805extern ieee80211_rx_handler ieee80211_rx_handlers[];
806
807/* tx handling */ 973/* tx handling */
808extern ieee80211_tx_handler ieee80211_tx_handlers[];
809void ieee80211_clear_tx_pending(struct ieee80211_local *local); 974void ieee80211_clear_tx_pending(struct ieee80211_local *local);
810void ieee80211_tx_pending(unsigned long data); 975void ieee80211_tx_pending(unsigned long data);
811int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev); 976int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/net/mac80211/ieee80211_iface.c b/net/mac80211/iface.c
index 92f1eb2da311..80954a512185 100644
--- a/net/mac80211/ieee80211_iface.c
+++ b/net/mac80211/iface.c
@@ -15,6 +15,7 @@
15#include "ieee80211_i.h" 15#include "ieee80211_i.h"
16#include "sta_info.h" 16#include "sta_info.h"
17#include "debugfs_netdev.h" 17#include "debugfs_netdev.h"
18#include "mesh.h"
18 19
19void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata) 20void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata)
20{ 21{
@@ -39,7 +40,8 @@ static void ieee80211_if_sdata_deinit(struct ieee80211_sub_if_data *sdata)
39 40
40/* Must be called with rtnl lock held. */ 41/* Must be called with rtnl lock held. */
41int ieee80211_if_add(struct net_device *dev, const char *name, 42int ieee80211_if_add(struct net_device *dev, const char *name,
42 struct net_device **new_dev, int type) 43 struct net_device **new_dev, int type,
44 struct vif_params *params)
43{ 45{
44 struct net_device *ndev; 46 struct net_device *ndev;
45 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 47 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -78,6 +80,12 @@ int ieee80211_if_add(struct net_device *dev, const char *name,
78 ieee80211_debugfs_add_netdev(sdata); 80 ieee80211_debugfs_add_netdev(sdata);
79 ieee80211_if_set_type(ndev, type); 81 ieee80211_if_set_type(ndev, type);
80 82
83 if (ieee80211_vif_is_mesh(&sdata->vif) &&
84 params && params->mesh_id_len)
85 ieee80211_if_sta_set_mesh_id(&sdata->u.sta,
86 params->mesh_id_len,
87 params->mesh_id);
88
81 /* we're under RTNL so all this is fine */ 89 /* we're under RTNL so all this is fine */
82 if (unlikely(local->reg_state == IEEE80211_DEV_UNREGISTERED)) { 90 if (unlikely(local->reg_state == IEEE80211_DEV_UNREGISTERED)) {
83 __ieee80211_if_del(local, sdata); 91 __ieee80211_if_del(local, sdata);
@@ -118,6 +126,8 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
118 sdata->bss = NULL; 126 sdata->bss = NULL;
119 sdata->vif.type = type; 127 sdata->vif.type = type;
120 128
129 sdata->basic_rates = 0;
130
121 switch (type) { 131 switch (type) {
122 case IEEE80211_IF_TYPE_WDS: 132 case IEEE80211_IF_TYPE_WDS:
123 /* nothing special */ 133 /* nothing special */
@@ -132,6 +142,7 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
132 sdata->bss = &sdata->u.ap; 142 sdata->bss = &sdata->u.ap;
133 INIT_LIST_HEAD(&sdata->u.ap.vlans); 143 INIT_LIST_HEAD(&sdata->u.ap.vlans);
134 break; 144 break;
145 case IEEE80211_IF_TYPE_MESH_POINT:
135 case IEEE80211_IF_TYPE_STA: 146 case IEEE80211_IF_TYPE_STA:
136 case IEEE80211_IF_TYPE_IBSS: { 147 case IEEE80211_IF_TYPE_IBSS: {
137 struct ieee80211_sub_if_data *msdata; 148 struct ieee80211_sub_if_data *msdata;
@@ -153,15 +164,20 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
153 164
154 msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev); 165 msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev);
155 sdata->bss = &msdata->u.ap; 166 sdata->bss = &msdata->u.ap;
167
168 if (ieee80211_vif_is_mesh(&sdata->vif))
169 ieee80211_mesh_init_sdata(sdata);
156 break; 170 break;
157 } 171 }
158 case IEEE80211_IF_TYPE_MNTR: 172 case IEEE80211_IF_TYPE_MNTR:
159 dev->type = ARPHRD_IEEE80211_RADIOTAP; 173 dev->type = ARPHRD_IEEE80211_RADIOTAP;
160 dev->hard_start_xmit = ieee80211_monitor_start_xmit; 174 dev->hard_start_xmit = ieee80211_monitor_start_xmit;
175 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
176 MONITOR_FLAG_OTHER_BSS;
161 break; 177 break;
162 default: 178 default:
163 printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x", 179 printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x",
164 dev->name, __FUNCTION__, type); 180 dev->name, __func__, type);
165 } 181 }
166 ieee80211_debugfs_change_if_type(sdata, oldtype); 182 ieee80211_debugfs_change_if_type(sdata, oldtype);
167} 183}
@@ -171,8 +187,8 @@ void ieee80211_if_reinit(struct net_device *dev)
171{ 187{
172 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 188 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
173 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 189 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
174 struct sta_info *sta;
175 struct sk_buff *skb; 190 struct sk_buff *skb;
191 int flushed;
176 192
177 ASSERT_RTNL(); 193 ASSERT_RTNL();
178 194
@@ -180,6 +196,10 @@ void ieee80211_if_reinit(struct net_device *dev)
180 196
181 ieee80211_if_sdata_deinit(sdata); 197 ieee80211_if_sdata_deinit(sdata);
182 198
199 /* Need to handle mesh specially to allow eliding the function call */
200 if (ieee80211_vif_is_mesh(&sdata->vif))
201 mesh_rmc_free(dev);
202
183 switch (sdata->vif.type) { 203 switch (sdata->vif.type) {
184 case IEEE80211_IF_TYPE_INVALID: 204 case IEEE80211_IF_TYPE_INVALID:
185 /* cannot happen */ 205 /* cannot happen */
@@ -189,6 +209,7 @@ void ieee80211_if_reinit(struct net_device *dev)
189 /* Remove all virtual interfaces that use this BSS 209 /* Remove all virtual interfaces that use this BSS
190 * as their sdata->bss */ 210 * as their sdata->bss */
191 struct ieee80211_sub_if_data *tsdata, *n; 211 struct ieee80211_sub_if_data *tsdata, *n;
212 struct beacon_data *beacon;
192 213
193 list_for_each_entry_safe(tsdata, n, &local->interfaces, list) { 214 list_for_each_entry_safe(tsdata, n, &local->interfaces, list) {
194 if (tsdata != sdata && tsdata->bss == &sdata->u.ap) { 215 if (tsdata != sdata && tsdata->bss == &sdata->u.ap) {
@@ -206,7 +227,10 @@ void ieee80211_if_reinit(struct net_device *dev)
206 } 227 }
207 } 228 }
208 229
209 kfree(sdata->u.ap.beacon); 230 beacon = sdata->u.ap.beacon;
231 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
232 synchronize_rcu();
233 kfree(beacon);
210 234
211 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { 235 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
212 local->total_ps_buffered--; 236 local->total_ps_buffered--;
@@ -216,17 +240,9 @@ void ieee80211_if_reinit(struct net_device *dev)
216 break; 240 break;
217 } 241 }
218 case IEEE80211_IF_TYPE_WDS: 242 case IEEE80211_IF_TYPE_WDS:
219 sta = sta_info_get(local, sdata->u.wds.remote_addr); 243 /* nothing to do */
220 if (sta) {
221 sta_info_free(sta);
222 sta_info_put(sta);
223 } else {
224#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
225 printk(KERN_DEBUG "%s: Someone had deleted my STA "
226 "entry for the WDS link\n", dev->name);
227#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
228 }
229 break; 244 break;
245 case IEEE80211_IF_TYPE_MESH_POINT:
230 case IEEE80211_IF_TYPE_STA: 246 case IEEE80211_IF_TYPE_STA:
231 case IEEE80211_IF_TYPE_IBSS: 247 case IEEE80211_IF_TYPE_IBSS:
232 kfree(sdata->u.sta.extra_ie); 248 kfree(sdata->u.sta.extra_ie);
@@ -249,8 +265,8 @@ void ieee80211_if_reinit(struct net_device *dev)
249 break; 265 break;
250 } 266 }
251 267
252 /* remove all STAs that are bound to this virtual interface */ 268 flushed = sta_info_flush(local, sdata);
253 sta_info_flush(local, dev); 269 WARN_ON(flushed);
254 270
255 memset(&sdata->u, 0, sizeof(sdata->u)); 271 memset(&sdata->u, 0, sizeof(sdata->u));
256 ieee80211_if_sdata_init(sdata); 272 ieee80211_if_sdata_init(sdata);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ed57fb8e82fc..150d66dbda9d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -2,7 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -13,14 +13,15 @@
13#include <linux/etherdevice.h> 13#include <linux/etherdevice.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16#include <linux/rtnetlink.h>
16#include <net/mac80211.h> 17#include <net/mac80211.h>
17#include "ieee80211_i.h" 18#include "ieee80211_i.h"
18#include "debugfs_key.h" 19#include "debugfs_key.h"
19#include "aes_ccm.h" 20#include "aes_ccm.h"
20 21
21 22
22/* 23/**
23 * Key handling basics 24 * DOC: Key handling basics
24 * 25 *
25 * Key handling in mac80211 is done based on per-interface (sub_if_data) 26 * Key handling in mac80211 is done based on per-interface (sub_if_data)
26 * keys and per-station keys. Since each station belongs to an interface, 27 * keys and per-station keys. Since each station belongs to an interface,
@@ -32,13 +33,81 @@
32 * There is currently no way of knowing this except by looking into 33 * There is currently no way of knowing this except by looking into
33 * debugfs. 34 * debugfs.
34 * 35 *
35 * All operations here are called under RTNL so no extra locking is 36 * All key operations are protected internally so you can call them at
36 * required. 37 * any time.
38 *
39 * Within mac80211, key references are, just as STA structure references,
40 * protected by RCU. Note, however, that some things are unprotected,
41 * namely the key->sta dereferences within the hardware acceleration
42 * functions. This means that sta_info_destroy() must flush the key todo
43 * list.
44 *
45 * All the direct key list manipulation functions must not sleep because
46 * they can operate on STA info structs that are protected by RCU.
37 */ 47 */
38 48
39static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 49static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
40static const u8 zero_addr[ETH_ALEN]; 50static const u8 zero_addr[ETH_ALEN];
41 51
52/* key mutex: used to synchronise todo runners */
53static DEFINE_MUTEX(key_mutex);
54static DEFINE_SPINLOCK(todo_lock);
55static LIST_HEAD(todo_list);
56
57static void key_todo(struct work_struct *work)
58{
59 ieee80211_key_todo();
60}
61
62static DECLARE_WORK(todo_work, key_todo);
63
64/**
65 * add_todo - add todo item for a key
66 *
67 * @key: key to add to do item for
68 * @flag: todo flag(s)
69 */
70static void add_todo(struct ieee80211_key *key, u32 flag)
71{
72 if (!key)
73 return;
74
75 spin_lock(&todo_lock);
76 key->flags |= flag;
77 /*
78 * Remove again if already on the list so that we move it to the end.
79 */
80 if (!list_empty(&key->todo))
81 list_del(&key->todo);
82 list_add_tail(&key->todo, &todo_list);
83 schedule_work(&todo_work);
84 spin_unlock(&todo_lock);
85}
86
87/**
88 * ieee80211_key_lock - lock the mac80211 key operation lock
89 *
90 * This locks the (global) mac80211 key operation lock, all
91 * key operations must be done under this lock.
92 */
93static void ieee80211_key_lock(void)
94{
95 mutex_lock(&key_mutex);
96}
97
98/**
99 * ieee80211_key_unlock - unlock the mac80211 key operation lock
100 */
101static void ieee80211_key_unlock(void)
102{
103 mutex_unlock(&key_mutex);
104}
105
106static void assert_key_lock(void)
107{
108 WARN_ON(!mutex_is_locked(&key_mutex));
109}
110
42static const u8 *get_mac_for_key(struct ieee80211_key *key) 111static const u8 *get_mac_for_key(struct ieee80211_key *key)
43{ 112{
44 const u8 *addr = bcast_addr; 113 const u8 *addr = bcast_addr;
@@ -65,6 +134,9 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
65 int ret; 134 int ret;
66 DECLARE_MAC_BUF(mac); 135 DECLARE_MAC_BUF(mac);
67 136
137 assert_key_lock();
138 might_sleep();
139
68 if (!key->local->ops->set_key) 140 if (!key->local->ops->set_key)
69 return; 141 return;
70 142
@@ -74,8 +146,11 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
74 key->sdata->dev->dev_addr, addr, 146 key->sdata->dev->dev_addr, addr,
75 &key->conf); 147 &key->conf);
76 148
77 if (!ret) 149 if (!ret) {
150 spin_lock(&todo_lock);
78 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 151 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
152 spin_unlock(&todo_lock);
153 }
79 154
80 if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) 155 if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
81 printk(KERN_ERR "mac80211-%s: failed to set key " 156 printk(KERN_ERR "mac80211-%s: failed to set key "
@@ -90,11 +165,18 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
90 int ret; 165 int ret;
91 DECLARE_MAC_BUF(mac); 166 DECLARE_MAC_BUF(mac);
92 167
93 if (!key->local->ops->set_key) 168 assert_key_lock();
169 might_sleep();
170
171 if (!key || !key->local->ops->set_key)
94 return; 172 return;
95 173
96 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 174 spin_lock(&todo_lock);
175 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
176 spin_unlock(&todo_lock);
97 return; 177 return;
178 }
179 spin_unlock(&todo_lock);
98 180
99 addr = get_mac_for_key(key); 181 addr = get_mac_for_key(key);
100 182
@@ -108,12 +190,75 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
108 wiphy_name(key->local->hw.wiphy), 190 wiphy_name(key->local->hw.wiphy),
109 key->conf.keyidx, print_mac(mac, addr), ret); 191 key->conf.keyidx, print_mac(mac, addr), ret);
110 192
193 spin_lock(&todo_lock);
111 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 194 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
195 spin_unlock(&todo_lock);
112} 196}
113 197
114struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, 198static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
115 struct sta_info *sta, 199 int idx)
116 enum ieee80211_key_alg alg, 200{
201 struct ieee80211_key *key = NULL;
202
203 if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
204 key = sdata->keys[idx];
205
206 rcu_assign_pointer(sdata->default_key, key);
207
208 if (key)
209 add_todo(key, KEY_FLAG_TODO_DEFKEY);
210}
211
212void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx)
213{
214 unsigned long flags;
215
216 spin_lock_irqsave(&sdata->local->key_lock, flags);
217 __ieee80211_set_default_key(sdata, idx);
218 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
219}
220
221
222static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
223 struct sta_info *sta,
224 struct ieee80211_key *old,
225 struct ieee80211_key *new)
226{
227 int idx, defkey;
228
229 if (new)
230 list_add(&new->list, &sdata->key_list);
231
232 if (sta) {
233 rcu_assign_pointer(sta->key, new);
234 } else {
235 WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx);
236
237 if (old)
238 idx = old->conf.keyidx;
239 else
240 idx = new->conf.keyidx;
241
242 defkey = old && sdata->default_key == old;
243
244 if (defkey && !new)
245 __ieee80211_set_default_key(sdata, -1);
246
247 rcu_assign_pointer(sdata->keys[idx], new);
248 if (defkey && new)
249 __ieee80211_set_default_key(sdata, new->conf.keyidx);
250 }
251
252 if (old) {
253 /*
254 * We'll use an empty list to indicate that the key
255 * has already been removed.
256 */
257 list_del_init(&old->list);
258 }
259}
260
261struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
117 int idx, 262 int idx,
118 size_t key_len, 263 size_t key_len,
119 const u8 *key_data) 264 const u8 *key_data)
@@ -137,10 +282,8 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata,
137 key->conf.keyidx = idx; 282 key->conf.keyidx = idx;
138 key->conf.keylen = key_len; 283 key->conf.keylen = key_len;
139 memcpy(key->conf.key, key_data, key_len); 284 memcpy(key->conf.key, key_data, key_len);
140 285 INIT_LIST_HEAD(&key->list);
141 key->local = sdata->local; 286 INIT_LIST_HEAD(&key->todo);
142 key->sdata = sdata;
143 key->sta = sta;
144 287
145 if (alg == ALG_CCMP) { 288 if (alg == ALG_CCMP) {
146 /* 289 /*
@@ -149,22 +292,31 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata,
149 */ 292 */
150 key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data); 293 key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data);
151 if (!key->u.ccmp.tfm) { 294 if (!key->u.ccmp.tfm) {
152 ieee80211_key_free(key); 295 kfree(key);
153 return NULL; 296 return NULL;
154 } 297 }
155 } 298 }
156 299
157 ieee80211_debugfs_key_add(key->local, key); 300 return key;
301}
158 302
159 /* remove key first */ 303void ieee80211_key_link(struct ieee80211_key *key,
160 if (sta) 304 struct ieee80211_sub_if_data *sdata,
161 ieee80211_key_free(sta->key); 305 struct sta_info *sta)
162 else 306{
163 ieee80211_key_free(sdata->keys[idx]); 307 struct ieee80211_key *old_key;
308 unsigned long flags;
309 int idx;
164 310
165 if (sta) { 311 BUG_ON(!sdata);
166 ieee80211_debugfs_key_sta_link(key, sta); 312 BUG_ON(!key);
167 313
314 idx = key->conf.keyidx;
315 key->local = sdata->local;
316 key->sdata = sdata;
317 key->sta = sta;
318
319 if (sta) {
168 /* 320 /*
169 * some hardware cannot handle TKIP with QoS, so 321 * some hardware cannot handle TKIP with QoS, so
170 * we indicate whether QoS could be in use. 322 * we indicate whether QoS could be in use.
@@ -175,105 +327,194 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata,
175 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 327 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
176 struct sta_info *ap; 328 struct sta_info *ap;
177 329
330 /*
331 * We're getting a sta pointer in,
332 * so must be under RCU read lock.
333 */
334
178 /* same here, the AP could be using QoS */ 335 /* same here, the AP could be using QoS */
179 ap = sta_info_get(key->local, key->sdata->u.sta.bssid); 336 ap = sta_info_get(key->local, key->sdata->u.sta.bssid);
180 if (ap) { 337 if (ap) {
181 if (ap->flags & WLAN_STA_WME) 338 if (ap->flags & WLAN_STA_WME)
182 key->conf.flags |= 339 key->conf.flags |=
183 IEEE80211_KEY_FLAG_WMM_STA; 340 IEEE80211_KEY_FLAG_WMM_STA;
184 sta_info_put(ap);
185 } 341 }
186 } 342 }
187 } 343 }
188 344
189 /* enable hwaccel if appropriate */ 345 spin_lock_irqsave(&sdata->local->key_lock, flags);
190 if (netif_running(key->sdata->dev))
191 ieee80211_key_enable_hw_accel(key);
192 346
193 if (sta) 347 if (sta)
194 rcu_assign_pointer(sta->key, key); 348 old_key = sta->key;
195 else 349 else
196 rcu_assign_pointer(sdata->keys[idx], key); 350 old_key = sdata->keys[idx];
197 351
198 list_add(&key->list, &sdata->key_list); 352 __ieee80211_key_replace(sdata, sta, old_key, key);
199 353
200 return key; 354 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
355
356 /* free old key later */
357 add_todo(old_key, KEY_FLAG_TODO_DELETE);
358
359 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
360 if (netif_running(sdata->dev))
361 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
362}
363
364static void __ieee80211_key_free(struct ieee80211_key *key)
365{
366 /*
367 * Replace key with nothingness if it was ever used.
368 */
369 if (key->sdata)
370 __ieee80211_key_replace(key->sdata, key->sta,
371 key, NULL);
372
373 add_todo(key, KEY_FLAG_TODO_DELETE);
201} 374}
202 375
203void ieee80211_key_free(struct ieee80211_key *key) 376void ieee80211_key_free(struct ieee80211_key *key)
204{ 377{
378 unsigned long flags;
379
205 if (!key) 380 if (!key)
206 return; 381 return;
207 382
208 if (key->sta) { 383 spin_lock_irqsave(&key->sdata->local->key_lock, flags);
209 rcu_assign_pointer(key->sta->key, NULL); 384 __ieee80211_key_free(key);
210 } else { 385 spin_unlock_irqrestore(&key->sdata->local->key_lock, flags);
211 if (key->sdata->default_key == key) 386}
212 ieee80211_set_default_key(key->sdata, -1);
213 if (key->conf.keyidx >= 0 &&
214 key->conf.keyidx < NUM_DEFAULT_KEYS)
215 rcu_assign_pointer(key->sdata->keys[key->conf.keyidx],
216 NULL);
217 else
218 WARN_ON(1);
219 }
220 387
221 /* wait for all key users to complete */ 388/*
222 synchronize_rcu(); 389 * To be safe against concurrent manipulations of the list (which shouldn't
390 * actually happen) we need to hold the spinlock. But under the spinlock we
391 * can't actually do much, so we defer processing to the todo list. Then run
392 * the todo list to be sure the operation and possibly previously pending
393 * operations are completed.
394 */
395static void ieee80211_todo_for_each_key(struct ieee80211_sub_if_data *sdata,
396 u32 todo_flags)
397{
398 struct ieee80211_key *key;
399 unsigned long flags;
400
401 might_sleep();
402
403 spin_lock_irqsave(&sdata->local->key_lock, flags);
404 list_for_each_entry(key, &sdata->key_list, list)
405 add_todo(key, todo_flags);
406 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
407
408 ieee80211_key_todo();
409}
410
411void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
412{
413 ASSERT_RTNL();
414
415 if (WARN_ON(!netif_running(sdata->dev)))
416 return;
417
418 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
419}
420
421void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
422{
423 ASSERT_RTNL();
424
425 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_REMOVE);
426}
427
428static void __ieee80211_key_destroy(struct ieee80211_key *key)
429{
430 if (!key)
431 return;
223 432
224 /* remove from hwaccel if appropriate */
225 ieee80211_key_disable_hw_accel(key); 433 ieee80211_key_disable_hw_accel(key);
226 434
227 if (key->conf.alg == ALG_CCMP) 435 if (key->conf.alg == ALG_CCMP)
228 ieee80211_aes_key_free(key->u.ccmp.tfm); 436 ieee80211_aes_key_free(key->u.ccmp.tfm);
229 ieee80211_debugfs_key_remove(key); 437 ieee80211_debugfs_key_remove(key);
230 438
231 list_del(&key->list);
232
233 kfree(key); 439 kfree(key);
234} 440}
235 441
236void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) 442static void __ieee80211_key_todo(void)
237{ 443{
238 struct ieee80211_key *key = NULL; 444 struct ieee80211_key *key;
445 bool work_done;
446 u32 todoflags;
239 447
240 if (idx >= 0 && idx < NUM_DEFAULT_KEYS) 448 /*
241 key = sdata->keys[idx]; 449 * NB: sta_info_destroy relies on this!
450 */
451 synchronize_rcu();
242 452
243 if (sdata->default_key != key) { 453 spin_lock(&todo_lock);
244 ieee80211_debugfs_key_remove_default(sdata); 454 while (!list_empty(&todo_list)) {
455 key = list_first_entry(&todo_list, struct ieee80211_key, todo);
456 list_del_init(&key->todo);
457 todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS |
458 KEY_FLAG_TODO_DEFKEY |
459 KEY_FLAG_TODO_HWACCEL_ADD |
460 KEY_FLAG_TODO_HWACCEL_REMOVE |
461 KEY_FLAG_TODO_DELETE);
462 key->flags &= ~todoflags;
463 spin_unlock(&todo_lock);
464
465 work_done = false;
466
467 if (todoflags & KEY_FLAG_TODO_ADD_DEBUGFS) {
468 ieee80211_debugfs_key_add(key);
469 work_done = true;
470 }
471 if (todoflags & KEY_FLAG_TODO_DEFKEY) {
472 ieee80211_debugfs_key_remove_default(key->sdata);
473 ieee80211_debugfs_key_add_default(key->sdata);
474 work_done = true;
475 }
476 if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) {
477 ieee80211_key_enable_hw_accel(key);
478 work_done = true;
479 }
480 if (todoflags & KEY_FLAG_TODO_HWACCEL_REMOVE) {
481 ieee80211_key_disable_hw_accel(key);
482 work_done = true;
483 }
484 if (todoflags & KEY_FLAG_TODO_DELETE) {
485 __ieee80211_key_destroy(key);
486 work_done = true;
487 }
245 488
246 rcu_assign_pointer(sdata->default_key, key); 489 WARN_ON(!work_done);
247 490
248 if (sdata->default_key) 491 spin_lock(&todo_lock);
249 ieee80211_debugfs_key_add_default(sdata);
250 } 492 }
493 spin_unlock(&todo_lock);
251} 494}
252 495
253void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) 496void ieee80211_key_todo(void)
254{ 497{
255 struct ieee80211_key *key, *tmp; 498 ieee80211_key_lock();
256 499 __ieee80211_key_todo();
257 list_for_each_entry_safe(key, tmp, &sdata->key_list, list) 500 ieee80211_key_unlock();
258 ieee80211_key_free(key);
259} 501}
260 502
261void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) 503void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
262{ 504{
263 struct ieee80211_key *key; 505 struct ieee80211_key *key, *tmp;
506 unsigned long flags;
264 507
265 WARN_ON(!netif_running(sdata->dev)); 508 ieee80211_key_lock();
266 if (!netif_running(sdata->dev))
267 return;
268 509
269 list_for_each_entry(key, &sdata->key_list, list) 510 ieee80211_debugfs_key_remove_default(sdata);
270 ieee80211_key_enable_hw_accel(key);
271}
272 511
273void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) 512 spin_lock_irqsave(&sdata->local->key_lock, flags);
274{ 513 list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
275 struct ieee80211_key *key; 514 __ieee80211_key_free(key);
515 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
276 516
277 list_for_each_entry(key, &sdata->key_list, list) 517 __ieee80211_key_todo();
278 ieee80211_key_disable_hw_accel(key); 518
519 ieee80211_key_unlock();
279} 520}
diff --git a/net/mac80211/ieee80211_key.h b/net/mac80211/key.h
index fc770e98d47b..f52c3df1fe9a 100644
--- a/net/mac80211/ieee80211_key.h
+++ b/net/mac80211/key.h
@@ -13,6 +13,7 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/rcupdate.h>
16#include <net/mac80211.h> 17#include <net/mac80211.h>
17 18
18/* ALG_TKIP 19/* ALG_TKIP
@@ -45,15 +46,40 @@ struct ieee80211_local;
45struct ieee80211_sub_if_data; 46struct ieee80211_sub_if_data;
46struct sta_info; 47struct sta_info;
47 48
48#define KEY_FLAG_UPLOADED_TO_HARDWARE (1<<0) 49/**
50 * enum ieee80211_internal_key_flags - internal key flags
51 *
52 * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
53 * in the hardware for TX crypto hardware acceleration.
54 * @KEY_FLAG_TODO_DELETE: Key is marked for deletion and will, after an
55 * RCU grace period, no longer be reachable other than from the
56 * todo list.
57 * @KEY_FLAG_TODO_HWACCEL_ADD: Key needs to be added to hardware acceleration.
58 * @KEY_FLAG_TODO_HWACCEL_REMOVE: Key needs to be removed from hardware
59 * acceleration.
60 * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated.
61 * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs.
62 */
63enum ieee80211_internal_key_flags {
64 KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
65 KEY_FLAG_TODO_DELETE = BIT(1),
66 KEY_FLAG_TODO_HWACCEL_ADD = BIT(2),
67 KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3),
68 KEY_FLAG_TODO_DEFKEY = BIT(4),
69 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5),
70};
49 71
50struct ieee80211_key { 72struct ieee80211_key {
51 struct ieee80211_local *local; 73 struct ieee80211_local *local;
52 struct ieee80211_sub_if_data *sdata; 74 struct ieee80211_sub_if_data *sdata;
53 struct sta_info *sta; 75 struct sta_info *sta;
54 76
77 /* for sdata list */
55 struct list_head list; 78 struct list_head list;
79 /* for todo list */
80 struct list_head todo;
56 81
82 /* protected by todo lock! */
57 unsigned int flags; 83 unsigned int flags;
58 84
59 union { 85 union {
@@ -102,6 +128,7 @@ struct ieee80211_key {
102 struct dentry *replays; 128 struct dentry *replays;
103 struct dentry *key; 129 struct dentry *key;
104 struct dentry *ifindex; 130 struct dentry *ifindex;
131 int cnt;
105 } debugfs; 132 } debugfs;
106#endif 133#endif
107 134
@@ -112,16 +139,23 @@ struct ieee80211_key {
112 struct ieee80211_key_conf conf; 139 struct ieee80211_key_conf conf;
113}; 140};
114 141
115struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, 142struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
116 struct sta_info *sta,
117 enum ieee80211_key_alg alg,
118 int idx, 143 int idx,
119 size_t key_len, 144 size_t key_len,
120 const u8 *key_data); 145 const u8 *key_data);
146/*
147 * Insert a key into data structures (sdata, sta if necessary)
148 * to make it used, free old key.
149 */
150void ieee80211_key_link(struct ieee80211_key *key,
151 struct ieee80211_sub_if_data *sdata,
152 struct sta_info *sta);
121void ieee80211_key_free(struct ieee80211_key *key); 153void ieee80211_key_free(struct ieee80211_key *key);
122void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); 154void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx);
123void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); 155void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
124void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 156void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
125void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); 157void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
126 158
159void ieee80211_key_todo(void);
160
127#endif /* IEEE80211_KEY_H */ 161#endif /* IEEE80211_KEY_H */
diff --git a/net/mac80211/ieee80211_led.c b/net/mac80211/led.c
index f401484ab6d7..162a643f16b6 100644
--- a/net/mac80211/ieee80211_led.c
+++ b/net/mac80211/led.c
@@ -8,7 +8,7 @@
8 8
9/* just for IFNAMSIZ */ 9/* just for IFNAMSIZ */
10#include <linux/if.h> 10#include <linux/if.h>
11#include "ieee80211_led.h" 11#include "led.h"
12 12
13void ieee80211_led_rx(struct ieee80211_local *local) 13void ieee80211_led_rx(struct ieee80211_local *local)
14{ 14{
diff --git a/net/mac80211/ieee80211_led.h b/net/mac80211/led.h
index 77b1e1ba6039..77b1e1ba6039 100644
--- a/net/mac80211/ieee80211_led.h
+++ b/net/mac80211/led.h
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/main.c
index 8e586390a2ef..e9a978979d38 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/main.c
@@ -25,11 +25,12 @@
25#include <net/cfg80211.h> 25#include <net/cfg80211.h>
26 26
27#include "ieee80211_i.h" 27#include "ieee80211_i.h"
28#include "ieee80211_rate.h" 28#include "rate.h"
29#include "mesh.h"
29#include "wep.h" 30#include "wep.h"
30#include "wme.h" 31#include "wme.h"
31#include "aes_ccm.h" 32#include "aes_ccm.h"
32#include "ieee80211_led.h" 33#include "led.h"
33#include "cfg.h" 34#include "cfg.h"
34#include "debugfs.h" 35#include "debugfs.h"
35#include "debugfs_netdev.h" 36#include "debugfs_netdev.h"
@@ -67,9 +68,19 @@ static void ieee80211_configure_filter(struct ieee80211_local *local)
67 new_flags |= FIF_ALLMULTI; 68 new_flags |= FIF_ALLMULTI;
68 69
69 if (local->monitors) 70 if (local->monitors)
70 new_flags |= FIF_CONTROL | 71 new_flags |= FIF_BCN_PRBRESP_PROMISC;
71 FIF_OTHER_BSS | 72
72 FIF_BCN_PRBRESP_PROMISC; 73 if (local->fif_fcsfail)
74 new_flags |= FIF_FCSFAIL;
75
76 if (local->fif_plcpfail)
77 new_flags |= FIF_PLCPFAIL;
78
79 if (local->fif_control)
80 new_flags |= FIF_CONTROL;
81
82 if (local->fif_other_bss)
83 new_flags |= FIF_OTHER_BSS;
73 84
74 changed_flags = local->filter_flags ^ new_flags; 85 changed_flags = local->filter_flags ^ new_flags;
75 86
@@ -128,9 +139,15 @@ static void ieee80211_master_set_multicast_list(struct net_device *dev)
128 139
129static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) 140static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
130{ 141{
142 int meshhdrlen;
143 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
144
145 meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
146
131 /* FIX: what would be proper limits for MTU? 147 /* FIX: what would be proper limits for MTU?
132 * This interface uses 802.3 frames. */ 148 * This interface uses 802.3 frames. */
133 if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6) { 149 if (new_mtu < 256 ||
150 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
134 printk(KERN_WARNING "%s: invalid MTU %d\n", 151 printk(KERN_WARNING "%s: invalid MTU %d\n",
135 dev->name, new_mtu); 152 dev->name, new_mtu);
136 return -EINVAL; 153 return -EINVAL;
@@ -166,6 +183,7 @@ static int ieee80211_open(struct net_device *dev)
166 struct ieee80211_if_init_conf conf; 183 struct ieee80211_if_init_conf conf;
167 int res; 184 int res;
168 bool need_hw_reconfig = 0; 185 bool need_hw_reconfig = 0;
186 struct sta_info *sta;
169 187
170 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 188 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
171 189
@@ -173,8 +191,52 @@ static int ieee80211_open(struct net_device *dev)
173 list_for_each_entry(nsdata, &local->interfaces, list) { 191 list_for_each_entry(nsdata, &local->interfaces, list) {
174 struct net_device *ndev = nsdata->dev; 192 struct net_device *ndev = nsdata->dev;
175 193
176 if (ndev != dev && ndev != local->mdev && netif_running(ndev) && 194 if (ndev != dev && ndev != local->mdev && netif_running(ndev)) {
177 compare_ether_addr(dev->dev_addr, ndev->dev_addr) == 0) { 195 /*
196 * Allow only a single IBSS interface to be up at any
197 * time. This is restricted because beacon distribution
198 * cannot work properly if both are in the same IBSS.
199 *
200 * To remove this restriction we'd have to disallow them
201 * from setting the same SSID on different IBSS interfaces
202 * belonging to the same hardware. Then, however, we're
203 * faced with having to adopt two different TSF timers...
204 */
205 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
206 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
207 return -EBUSY;
208
209 /*
210 * Disallow multiple IBSS/STA mode interfaces.
211 *
212 * This is a technical restriction, it is possible although
213 * most likely not IEEE 802.11 compliant to have multiple
214 * STAs with just a single hardware (the TSF timer will not
215 * be adjusted properly.)
216 *
217 * However, because mac80211 uses the master device's BSS
218 * information for each STA/IBSS interface, doing this will
219 * currently corrupt that BSS information completely, unless,
220 * a not very useful case, both STAs are associated to the
221 * same BSS.
222 *
223 * To remove this restriction, the BSS information needs to
224 * be embedded in the STA/IBSS mode sdata instead of using
225 * the master device's BSS structure.
226 */
227 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
228 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
229 (nsdata->vif.type == IEEE80211_IF_TYPE_STA ||
230 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS))
231 return -EBUSY;
232
233 /*
234 * The remaining checks are only performed for interfaces
235 * with the same MAC address.
236 */
237 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
238 continue;
239
178 /* 240 /*
179 * check whether it may have the same address 241 * check whether it may have the same address
180 */ 242 */
@@ -186,8 +248,7 @@ static int ieee80211_open(struct net_device *dev)
186 * can only add VLANs to enabled APs 248 * can only add VLANs to enabled APs
187 */ 249 */
188 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN && 250 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
189 nsdata->vif.type == IEEE80211_IF_TYPE_AP && 251 nsdata->vif.type == IEEE80211_IF_TYPE_AP)
190 netif_running(nsdata->dev))
191 sdata->u.vlan.ap = nsdata; 252 sdata->u.vlan.ap = nsdata;
192 } 253 }
193 } 254 }
@@ -196,6 +257,20 @@ static int ieee80211_open(struct net_device *dev)
196 case IEEE80211_IF_TYPE_WDS: 257 case IEEE80211_IF_TYPE_WDS:
197 if (is_zero_ether_addr(sdata->u.wds.remote_addr)) 258 if (is_zero_ether_addr(sdata->u.wds.remote_addr))
198 return -ENOLINK; 259 return -ENOLINK;
260
261 /* Create STA entry for the WDS peer */
262 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
263 GFP_KERNEL);
264 if (!sta)
265 return -ENOMEM;
266
267 sta->flags |= WLAN_STA_AUTHORIZED;
268
269 res = sta_info_insert(sta);
270 if (res) {
271 /* STA has been freed */
272 return res;
273 }
199 break; 274 break;
200 case IEEE80211_IF_TYPE_VLAN: 275 case IEEE80211_IF_TYPE_VLAN:
201 if (!sdata->u.vlan.ap) 276 if (!sdata->u.vlan.ap)
@@ -205,6 +280,7 @@ static int ieee80211_open(struct net_device *dev)
205 case IEEE80211_IF_TYPE_STA: 280 case IEEE80211_IF_TYPE_STA:
206 case IEEE80211_IF_TYPE_MNTR: 281 case IEEE80211_IF_TYPE_MNTR:
207 case IEEE80211_IF_TYPE_IBSS: 282 case IEEE80211_IF_TYPE_IBSS:
283 case IEEE80211_IF_TYPE_MESH_POINT:
208 /* no special treatment */ 284 /* no special treatment */
209 break; 285 break;
210 case IEEE80211_IF_TYPE_INVALID: 286 case IEEE80211_IF_TYPE_INVALID:
@@ -229,15 +305,28 @@ static int ieee80211_open(struct net_device *dev)
229 /* no need to tell driver */ 305 /* no need to tell driver */
230 break; 306 break;
231 case IEEE80211_IF_TYPE_MNTR: 307 case IEEE80211_IF_TYPE_MNTR:
308 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
309 local->cooked_mntrs++;
310 break;
311 }
312
232 /* must be before the call to ieee80211_configure_filter */ 313 /* must be before the call to ieee80211_configure_filter */
233 local->monitors++; 314 local->monitors++;
234 if (local->monitors == 1) { 315 if (local->monitors == 1)
235 netif_tx_lock_bh(local->mdev);
236 ieee80211_configure_filter(local);
237 netif_tx_unlock_bh(local->mdev);
238
239 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; 316 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
240 } 317
318 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
319 local->fif_fcsfail++;
320 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
321 local->fif_plcpfail++;
322 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
323 local->fif_control++;
324 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
325 local->fif_other_bss++;
326
327 netif_tx_lock_bh(local->mdev);
328 ieee80211_configure_filter(local);
329 netif_tx_unlock_bh(local->mdev);
241 break; 330 break;
242 case IEEE80211_IF_TYPE_STA: 331 case IEEE80211_IF_TYPE_STA:
243 case IEEE80211_IF_TYPE_IBSS: 332 case IEEE80211_IF_TYPE_IBSS:
@@ -305,24 +394,46 @@ static int ieee80211_open(struct net_device *dev)
305 394
306static int ieee80211_stop(struct net_device *dev) 395static int ieee80211_stop(struct net_device *dev)
307{ 396{
308 struct ieee80211_sub_if_data *sdata; 397 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
309 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 398 struct ieee80211_local *local = sdata->local;
310 struct ieee80211_if_init_conf conf; 399 struct ieee80211_if_init_conf conf;
311 struct sta_info *sta; 400 struct sta_info *sta;
312 int i;
313 401
314 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 402 /*
403 * Stop TX on this interface first.
404 */
405 netif_stop_queue(dev);
315 406
316 list_for_each_entry(sta, &local->sta_list, list) { 407 /*
317 if (sta->dev == dev) 408 * Now delete all active aggregation sessions.
318 for (i = 0; i < STA_TID_NUM; i++) 409 */
319 ieee80211_sta_stop_rx_ba_session(sta->dev, 410 rcu_read_lock();
320 sta->addr, i, 411
321 WLAN_BACK_RECIPIENT, 412 list_for_each_entry_rcu(sta, &local->sta_list, list) {
322 WLAN_REASON_QSTA_LEAVE_QBSS); 413 if (sta->sdata == sdata)
414 ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
323 } 415 }
324 416
325 netif_stop_queue(dev); 417 rcu_read_unlock();
418
419 /*
420 * Remove all stations associated with this interface.
421 *
422 * This must be done before calling ops->remove_interface()
423 * because otherwise we can later invoke ops->sta_notify()
424 * whenever the STAs are removed, and that invalidates driver
425 * assumptions about always getting a vif pointer that is valid
426 * (because if we remove a STA after ops->remove_interface()
427 * the driver will have removed the vif info already!)
428 *
429 * We could relax this and only unlink the stations from the
430 * hash table and list but keep them on a per-sdata list that
431 * will be inserted back again when the interface is brought
432 * up again, but I don't currently see a use case for that,
433 * except with WDS which gets a STA entry created when it is
434 * brought up.
435 */
436 sta_info_flush(local, sdata);
326 437
327 /* 438 /*
328 * Don't count this interface for promisc/allmulti while it 439 * Don't count this interface for promisc/allmulti while it
@@ -364,15 +475,29 @@ static int ieee80211_stop(struct net_device *dev)
364 /* no need to tell driver */ 475 /* no need to tell driver */
365 break; 476 break;
366 case IEEE80211_IF_TYPE_MNTR: 477 case IEEE80211_IF_TYPE_MNTR:
367 local->monitors--; 478 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
368 if (local->monitors == 0) { 479 local->cooked_mntrs--;
369 netif_tx_lock_bh(local->mdev); 480 break;
370 ieee80211_configure_filter(local); 481 }
371 netif_tx_unlock_bh(local->mdev);
372 482
483 local->monitors--;
484 if (local->monitors == 0)
373 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; 485 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
374 } 486
487 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
488 local->fif_fcsfail--;
489 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
490 local->fif_plcpfail--;
491 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
492 local->fif_control--;
493 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
494 local->fif_other_bss--;
495
496 netif_tx_lock_bh(local->mdev);
497 ieee80211_configure_filter(local);
498 netif_tx_unlock_bh(local->mdev);
375 break; 499 break;
500 case IEEE80211_IF_TYPE_MESH_POINT:
376 case IEEE80211_IF_TYPE_STA: 501 case IEEE80211_IF_TYPE_STA:
377 case IEEE80211_IF_TYPE_IBSS: 502 case IEEE80211_IF_TYPE_IBSS:
378 sdata->u.sta.state = IEEE80211_DISABLED; 503 sdata->u.sta.state = IEEE80211_DISABLED;
@@ -426,6 +551,357 @@ static int ieee80211_stop(struct net_device *dev)
426 return 0; 551 return 0;
427} 552}
428 553
554int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
555{
556 struct ieee80211_local *local = hw_to_local(hw);
557 struct sta_info *sta;
558 struct ieee80211_sub_if_data *sdata;
559 u16 start_seq_num = 0;
560 u8 *state;
561 int ret;
562 DECLARE_MAC_BUF(mac);
563
564 if (tid >= STA_TID_NUM)
565 return -EINVAL;
566
567#ifdef CONFIG_MAC80211_HT_DEBUG
568 printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
569 print_mac(mac, ra), tid);
570#endif /* CONFIG_MAC80211_HT_DEBUG */
571
572 rcu_read_lock();
573
574 sta = sta_info_get(local, ra);
575 if (!sta) {
576 printk(KERN_DEBUG "Could not find the station\n");
577 rcu_read_unlock();
578 return -ENOENT;
579 }
580
581 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
582
583 /* we have tried too many times, receiver does not want A-MPDU */
584 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
585 ret = -EBUSY;
586 goto start_ba_exit;
587 }
588
589 state = &sta->ampdu_mlme.tid_state_tx[tid];
590 /* check if the TID is not in aggregation flow already */
591 if (*state != HT_AGG_STATE_IDLE) {
592#ifdef CONFIG_MAC80211_HT_DEBUG
593 printk(KERN_DEBUG "BA request denied - session is not "
594 "idle on tid %u\n", tid);
595#endif /* CONFIG_MAC80211_HT_DEBUG */
596 ret = -EAGAIN;
597 goto start_ba_exit;
598 }
599
600 /* prepare A-MPDU MLME for Tx aggregation */
601 sta->ampdu_mlme.tid_tx[tid] =
602 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
603 if (!sta->ampdu_mlme.tid_tx[tid]) {
604 if (net_ratelimit())
605 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
606 tid);
607 ret = -ENOMEM;
608 goto start_ba_exit;
609 }
610 /* Tx timer */
611 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
612 sta_addba_resp_timer_expired;
613 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
614 (unsigned long)&sta->timer_to_tid[tid];
615 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
616
617 /* ensure that TX flow won't interrupt us
618 * until the end of the call to requeue function */
619 spin_lock_bh(&local->mdev->queue_lock);
620
621 /* create a new queue for this aggregation */
622 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
623
624 /* case no queue is available to aggregation
625 * don't switch to aggregation */
626 if (ret) {
627#ifdef CONFIG_MAC80211_HT_DEBUG
628 printk(KERN_DEBUG "BA request denied - queue unavailable for"
629 " tid %d\n", tid);
630#endif /* CONFIG_MAC80211_HT_DEBUG */
631 goto start_ba_err;
632 }
633 sdata = sta->sdata;
634
635 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
636 * call back right away, it must see that the flow has begun */
637 *state |= HT_ADDBA_REQUESTED_MSK;
638
639 if (local->ops->ampdu_action)
640 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
641 ra, tid, &start_seq_num);
642
643 if (ret) {
644 /* No need to requeue the packets in the agg queue, since we
645 * held the tx lock: no packet could be enqueued to the newly
646 * allocated queue */
647 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
648#ifdef CONFIG_MAC80211_HT_DEBUG
649 printk(KERN_DEBUG "BA request denied - HW unavailable for"
650 " tid %d\n", tid);
651#endif /* CONFIG_MAC80211_HT_DEBUG */
652 *state = HT_AGG_STATE_IDLE;
653 goto start_ba_err;
654 }
655
656 /* Will put all the packets in the new SW queue */
657 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
658 spin_unlock_bh(&local->mdev->queue_lock);
659
660 /* send an addBA request */
661 sta->ampdu_mlme.dialog_token_allocator++;
662 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
663 sta->ampdu_mlme.dialog_token_allocator;
664 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
665
666 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
667 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
668 sta->ampdu_mlme.tid_tx[tid]->ssn,
669 0x40, 5000);
670
671 /* activate the timer for the recipient's addBA response */
672 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
673 jiffies + ADDBA_RESP_INTERVAL;
674 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
675 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
676 goto start_ba_exit;
677
678start_ba_err:
679 kfree(sta->ampdu_mlme.tid_tx[tid]);
680 sta->ampdu_mlme.tid_tx[tid] = NULL;
681 spin_unlock_bh(&local->mdev->queue_lock);
682 ret = -EBUSY;
683start_ba_exit:
684 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
685 rcu_read_unlock();
686 return ret;
687}
688EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
689
690int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
691 u8 *ra, u16 tid,
692 enum ieee80211_back_parties initiator)
693{
694 struct ieee80211_local *local = hw_to_local(hw);
695 struct sta_info *sta;
696 u8 *state;
697 int ret = 0;
698 DECLARE_MAC_BUF(mac);
699
700 if (tid >= STA_TID_NUM)
701 return -EINVAL;
702
703 rcu_read_lock();
704 sta = sta_info_get(local, ra);
705 if (!sta) {
706 rcu_read_unlock();
707 return -ENOENT;
708 }
709
710 /* check if the TID is in aggregation */
711 state = &sta->ampdu_mlme.tid_state_tx[tid];
712 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
713
714 if (*state != HT_AGG_STATE_OPERATIONAL) {
715 ret = -ENOENT;
716 goto stop_BA_exit;
717 }
718
719#ifdef CONFIG_MAC80211_HT_DEBUG
720 printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
721 print_mac(mac, ra), tid);
722#endif /* CONFIG_MAC80211_HT_DEBUG */
723
724 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
725
726 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
727 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
728
729 if (local->ops->ampdu_action)
730 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
731 ra, tid, NULL);
732
733 /* case HW denied going back to legacy */
734 if (ret) {
735 WARN_ON(ret != -EBUSY);
736 *state = HT_AGG_STATE_OPERATIONAL;
737 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
738 goto stop_BA_exit;
739 }
740
741stop_BA_exit:
742 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
743 rcu_read_unlock();
744 return ret;
745}
746EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
747
748void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
749{
750 struct ieee80211_local *local = hw_to_local(hw);
751 struct sta_info *sta;
752 u8 *state;
753 DECLARE_MAC_BUF(mac);
754
755 if (tid >= STA_TID_NUM) {
756 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
757 tid, STA_TID_NUM);
758 return;
759 }
760
761 rcu_read_lock();
762 sta = sta_info_get(local, ra);
763 if (!sta) {
764 rcu_read_unlock();
765 printk(KERN_DEBUG "Could not find station: %s\n",
766 print_mac(mac, ra));
767 return;
768 }
769
770 state = &sta->ampdu_mlme.tid_state_tx[tid];
771 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
772
773 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
774 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
775 *state);
776 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
777 rcu_read_unlock();
778 return;
779 }
780
781 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
782
783 *state |= HT_ADDBA_DRV_READY_MSK;
784
785 if (*state == HT_AGG_STATE_OPERATIONAL) {
786 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
787 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
788 }
789 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
790 rcu_read_unlock();
791}
792EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
793
794void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
795{
796 struct ieee80211_local *local = hw_to_local(hw);
797 struct sta_info *sta;
798 u8 *state;
799 int agg_queue;
800 DECLARE_MAC_BUF(mac);
801
802 if (tid >= STA_TID_NUM) {
803 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
804 tid, STA_TID_NUM);
805 return;
806 }
807
808#ifdef CONFIG_MAC80211_HT_DEBUG
809 printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
810 print_mac(mac, ra), tid);
811#endif /* CONFIG_MAC80211_HT_DEBUG */
812
813 rcu_read_lock();
814 sta = sta_info_get(local, ra);
815 if (!sta) {
816 printk(KERN_DEBUG "Could not find station: %s\n",
817 print_mac(mac, ra));
818 rcu_read_unlock();
819 return;
820 }
821 state = &sta->ampdu_mlme.tid_state_tx[tid];
822
823 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
824 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
825 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
826 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
827 rcu_read_unlock();
828 return;
829 }
830
831 if (*state & HT_AGG_STATE_INITIATOR_MSK)
832 ieee80211_send_delba(sta->sdata->dev, ra, tid,
833 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
834
835 agg_queue = sta->tid_to_tx_q[tid];
836
837 /* avoid ordering issues: we are the only one that can modify
838 * the content of the qdiscs */
839 spin_lock_bh(&local->mdev->queue_lock);
840 /* remove the queue for this aggregation */
841 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
842 spin_unlock_bh(&local->mdev->queue_lock);
843
844 /* we just requeued the all the frames that were in the removed
845 * queue, and since we might miss a softirq we do netif_schedule.
846 * ieee80211_wake_queue is not used here as this queue is not
847 * necessarily stopped */
848 netif_schedule(local->mdev);
849 *state = HT_AGG_STATE_IDLE;
850 sta->ampdu_mlme.addba_req_num[tid] = 0;
851 kfree(sta->ampdu_mlme.tid_tx[tid]);
852 sta->ampdu_mlme.tid_tx[tid] = NULL;
853 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
854
855 rcu_read_unlock();
856}
857EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
858
859void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
860 const u8 *ra, u16 tid)
861{
862 struct ieee80211_local *local = hw_to_local(hw);
863 struct ieee80211_ra_tid *ra_tid;
864 struct sk_buff *skb = dev_alloc_skb(0);
865
866 if (unlikely(!skb)) {
867 if (net_ratelimit())
868 printk(KERN_WARNING "%s: Not enough memory, "
869 "dropping start BA session", skb->dev->name);
870 return;
871 }
872 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
873 memcpy(&ra_tid->ra, ra, ETH_ALEN);
874 ra_tid->tid = tid;
875
876 skb->pkt_type = IEEE80211_ADDBA_MSG;
877 skb_queue_tail(&local->skb_queue, skb);
878 tasklet_schedule(&local->tasklet);
879}
880EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
881
882void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
883 const u8 *ra, u16 tid)
884{
885 struct ieee80211_local *local = hw_to_local(hw);
886 struct ieee80211_ra_tid *ra_tid;
887 struct sk_buff *skb = dev_alloc_skb(0);
888
889 if (unlikely(!skb)) {
890 if (net_ratelimit())
891 printk(KERN_WARNING "%s: Not enough memory, "
892 "dropping stop BA session", skb->dev->name);
893 return;
894 }
895 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
896 memcpy(&ra_tid->ra, ra, ETH_ALEN);
897 ra_tid->tid = tid;
898
899 skb->pkt_type = IEEE80211_DELBA_MSG;
900 skb_queue_tail(&local->skb_queue, skb);
901 tasklet_schedule(&local->tasklet);
902}
903EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
904
429static void ieee80211_set_multicast_list(struct net_device *dev) 905static void ieee80211_set_multicast_list(struct net_device *dev)
430{ 906{
431 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 907 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -477,41 +953,6 @@ void ieee80211_if_setup(struct net_device *dev)
477 dev->destructor = ieee80211_if_free; 953 dev->destructor = ieee80211_if_free;
478} 954}
479 955
480/* WDS specialties */
481
482int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr)
483{
484 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
485 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
486 struct sta_info *sta;
487 DECLARE_MAC_BUF(mac);
488
489 if (compare_ether_addr(remote_addr, sdata->u.wds.remote_addr) == 0)
490 return 0;
491
492 /* Create STA entry for the new peer */
493 sta = sta_info_add(local, dev, remote_addr, GFP_KERNEL);
494 if (!sta)
495 return -ENOMEM;
496 sta_info_put(sta);
497
498 /* Remove STA entry for the old peer */
499 sta = sta_info_get(local, sdata->u.wds.remote_addr);
500 if (sta) {
501 sta_info_free(sta);
502 sta_info_put(sta);
503 } else {
504 printk(KERN_DEBUG "%s: could not find STA entry for WDS link "
505 "peer %s\n",
506 dev->name, print_mac(mac, sdata->u.wds.remote_addr));
507 }
508
509 /* Update WDS link data */
510 memcpy(&sdata->u.wds.remote_addr, remote_addr, ETH_ALEN);
511
512 return 0;
513}
514
515/* everything else */ 956/* everything else */
516 957
517static int __ieee80211_if_config(struct net_device *dev, 958static int __ieee80211_if_config(struct net_device *dev,
@@ -532,6 +973,9 @@ static int __ieee80211_if_config(struct net_device *dev,
532 conf.bssid = sdata->u.sta.bssid; 973 conf.bssid = sdata->u.sta.bssid;
533 conf.ssid = sdata->u.sta.ssid; 974 conf.ssid = sdata->u.sta.ssid;
534 conf.ssid_len = sdata->u.sta.ssid_len; 975 conf.ssid_len = sdata->u.sta.ssid_len;
976 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
977 conf.beacon = beacon;
978 ieee80211_start_mesh(dev);
535 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 979 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
536 conf.ssid = sdata->u.ap.ssid; 980 conf.ssid = sdata->u.ap.ssid;
537 conf.ssid_len = sdata->u.ap.ssid_len; 981 conf.ssid_len = sdata->u.ap.ssid_len;
@@ -544,6 +988,11 @@ static int __ieee80211_if_config(struct net_device *dev,
544 988
545int ieee80211_if_config(struct net_device *dev) 989int ieee80211_if_config(struct net_device *dev)
546{ 990{
991 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
992 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
993 if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
994 (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
995 return ieee80211_if_config_beacon(dev);
547 return __ieee80211_if_config(dev, NULL, NULL); 996 return __ieee80211_if_config(dev, NULL, NULL);
548} 997}
549 998
@@ -565,37 +1014,28 @@ int ieee80211_if_config_beacon(struct net_device *dev)
565 1014
566int ieee80211_hw_config(struct ieee80211_local *local) 1015int ieee80211_hw_config(struct ieee80211_local *local)
567{ 1016{
568 struct ieee80211_hw_mode *mode;
569 struct ieee80211_channel *chan; 1017 struct ieee80211_channel *chan;
570 int ret = 0; 1018 int ret = 0;
571 1019
572 if (local->sta_sw_scanning) { 1020 if (local->sta_sw_scanning)
573 chan = local->scan_channel; 1021 chan = local->scan_channel;
574 mode = local->scan_hw_mode; 1022 else
575 } else {
576 chan = local->oper_channel; 1023 chan = local->oper_channel;
577 mode = local->oper_hw_mode;
578 }
579 1024
580 local->hw.conf.channel = chan->chan; 1025 local->hw.conf.channel = chan;
581 local->hw.conf.channel_val = chan->val; 1026
582 if (!local->hw.conf.power_level) { 1027 if (!local->hw.conf.power_level)
583 local->hw.conf.power_level = chan->power_level; 1028 local->hw.conf.power_level = chan->max_power;
584 } else { 1029 else
585 local->hw.conf.power_level = min(chan->power_level, 1030 local->hw.conf.power_level = min(chan->max_power,
586 local->hw.conf.power_level); 1031 local->hw.conf.power_level);
587 } 1032
588 local->hw.conf.freq = chan->freq; 1033 local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
589 local->hw.conf.phymode = mode->mode;
590 local->hw.conf.antenna_max = chan->antenna_max;
591 local->hw.conf.chan = chan;
592 local->hw.conf.mode = mode;
593 1034
594#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1035#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
595 printk(KERN_DEBUG "HW CONFIG: channel=%d freq=%d " 1036 printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
596 "phymode=%d\n", local->hw.conf.channel, local->hw.conf.freq, 1037 wiphy_name(local->hw.wiphy), chan->center_freq);
597 local->hw.conf.phymode); 1038#endif
598#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
599 1039
600 if (local->open_count) 1040 if (local->open_count)
601 ret = local->ops->config(local_to_hw(local), &local->hw.conf); 1041 ret = local->ops->config(local_to_hw(local), &local->hw.conf);
@@ -604,52 +1044,69 @@ int ieee80211_hw_config(struct ieee80211_local *local)
604} 1044}
605 1045
606/** 1046/**
607 * ieee80211_hw_config_ht should be used only after legacy configuration 1047 * ieee80211_handle_ht should be used only after legacy configuration
608 * has been determined, as ht configuration depends upon the hardware's 1048 * has been determined namely band, as ht configuration depends upon
609 * HT abilities for a _specific_ band. 1049 * the hardware's HT abilities for a _specific_ band.
610 */ 1050 */
611int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht, 1051u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
612 struct ieee80211_ht_info *req_ht_cap, 1052 struct ieee80211_ht_info *req_ht_cap,
613 struct ieee80211_ht_bss_info *req_bss_cap) 1053 struct ieee80211_ht_bss_info *req_bss_cap)
614{ 1054{
615 struct ieee80211_conf *conf = &local->hw.conf; 1055 struct ieee80211_conf *conf = &local->hw.conf;
616 struct ieee80211_hw_mode *mode = conf->mode; 1056 struct ieee80211_supported_band *sband;
1057 struct ieee80211_ht_info ht_conf;
1058 struct ieee80211_ht_bss_info ht_bss_conf;
617 int i; 1059 int i;
1060 u32 changed = 0;
1061
1062 sband = local->hw.wiphy->bands[conf->channel->band];
618 1063
619 /* HT is not supported */ 1064 /* HT is not supported */
620 if (!mode->ht_info.ht_supported) { 1065 if (!sband->ht_info.ht_supported) {
621 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; 1066 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
622 return -EOPNOTSUPP; 1067 return 0;
623 } 1068 }
624 1069
625 /* disable HT */ 1070 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
626 if (!enable_ht) { 1071 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
627 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; 1072
628 } else { 1073 if (enable_ht) {
1074 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1075 changed |= BSS_CHANGED_HT;
1076
629 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE; 1077 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
630 conf->ht_conf.cap = req_ht_cap->cap & mode->ht_info.cap; 1078 ht_conf.ht_supported = 1;
631 conf->ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); 1079
632 conf->ht_conf.cap |= 1080 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
633 mode->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; 1081 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
634 conf->ht_bss_conf.primary_channel = 1082 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
635 req_bss_cap->primary_channel; 1083
636 conf->ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
637 conf->ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
638 for (i = 0; i < SUPP_MCS_SET_LEN; i++) 1084 for (i = 0; i < SUPP_MCS_SET_LEN; i++)
639 conf->ht_conf.supp_mcs_set[i] = 1085 ht_conf.supp_mcs_set[i] =
640 mode->ht_info.supp_mcs_set[i] & 1086 sband->ht_info.supp_mcs_set[i] &
641 req_ht_cap->supp_mcs_set[i]; 1087 req_ht_cap->supp_mcs_set[i];
642 1088
643 /* In STA mode, this gives us indication 1089 ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
644 * to the AP's mode of operation */ 1090 ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
645 conf->ht_conf.ht_supported = 1; 1091 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
646 conf->ht_conf.ampdu_factor = req_ht_cap->ampdu_factor; 1092
647 conf->ht_conf.ampdu_density = req_ht_cap->ampdu_density; 1093 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
1094 ht_conf.ampdu_density = req_ht_cap->ampdu_density;
1095
1096 /* if bss configuration changed store the new one */
1097 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
1098 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
1099 changed |= BSS_CHANGED_HT;
1100 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
1101 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
1102 }
1103 } else {
1104 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
1105 changed |= BSS_CHANGED_HT;
1106 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
648 } 1107 }
649 1108
650 local->ops->conf_ht(local_to_hw(local), &local->hw.conf); 1109 return changed;
651
652 return 0;
653} 1110}
654 1111
655void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 1112void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
@@ -725,6 +1182,7 @@ static void ieee80211_tasklet_handler(unsigned long data)
725 struct sk_buff *skb; 1182 struct sk_buff *skb;
726 struct ieee80211_rx_status rx_status; 1183 struct ieee80211_rx_status rx_status;
727 struct ieee80211_tx_status *tx_status; 1184 struct ieee80211_tx_status *tx_status;
1185 struct ieee80211_ra_tid *ra_tid;
728 1186
729 while ((skb = skb_dequeue(&local->skb_queue)) || 1187 while ((skb = skb_dequeue(&local->skb_queue)) ||
730 (skb = skb_dequeue(&local->skb_queue_unreliable))) { 1188 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
@@ -745,6 +1203,18 @@ static void ieee80211_tasklet_handler(unsigned long data)
745 skb, tx_status); 1203 skb, tx_status);
746 kfree(tx_status); 1204 kfree(tx_status);
747 break; 1205 break;
1206 case IEEE80211_DELBA_MSG:
1207 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
1208 ieee80211_stop_tx_ba_cb(local_to_hw(local),
1209 ra_tid->ra, ra_tid->tid);
1210 dev_kfree_skb(skb);
1211 break;
1212 case IEEE80211_ADDBA_MSG:
1213 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
1214 ieee80211_start_tx_ba_cb(local_to_hw(local),
1215 ra_tid->ra, ra_tid->tid);
1216 dev_kfree_skb(skb);
1217 break ;
748 default: /* should never get here! */ 1218 default: /* should never get here! */
749 printk(KERN_ERR "%s: Unknown message type (%d)\n", 1219 printk(KERN_ERR "%s: Unknown message type (%d)\n",
750 wiphy_name(local->hw.wiphy), skb->pkt_type); 1220 wiphy_name(local->hw.wiphy), skb->pkt_type);
@@ -822,6 +1292,77 @@ no_key:
822 } 1292 }
823} 1293}
824 1294
1295static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1296 struct sta_info *sta,
1297 struct sk_buff *skb,
1298 struct ieee80211_tx_status *status)
1299{
1300 sta->tx_filtered_count++;
1301
1302 /*
1303 * Clear the TX filter mask for this STA when sending the next
1304 * packet. If the STA went to power save mode, this will happen
1305 * happen when it wakes up for the next time.
1306 */
1307 sta->flags |= WLAN_STA_CLEAR_PS_FILT;
1308
1309 /*
1310 * This code races in the following way:
1311 *
1312 * (1) STA sends frame indicating it will go to sleep and does so
1313 * (2) hardware/firmware adds STA to filter list, passes frame up
1314 * (3) hardware/firmware processes TX fifo and suppresses a frame
1315 * (4) we get TX status before having processed the frame and
1316 * knowing that the STA has gone to sleep.
1317 *
1318 * This is actually quite unlikely even when both those events are
1319 * processed from interrupts coming in quickly after one another or
1320 * even at the same time because we queue both TX status events and
1321 * RX frames to be processed by a tasklet and process them in the
1322 * same order that they were received or TX status last. Hence, there
1323 * is no race as long as the frame RX is processed before the next TX
1324 * status, which drivers can ensure, see below.
1325 *
1326 * Note that this can only happen if the hardware or firmware can
1327 * actually add STAs to the filter list, if this is done by the
1328 * driver in response to set_tim() (which will only reduce the race
1329 * this whole filtering tries to solve, not completely solve it)
1330 * this situation cannot happen.
1331 *
1332 * To completely solve this race drivers need to make sure that they
1333 * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
1334 * functions and
1335 * (b) always process RX events before TX status events if ordering
1336 * can be unknown, for example with different interrupt status
1337 * bits.
1338 */
1339 if (sta->flags & WLAN_STA_PS &&
1340 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
1341 ieee80211_remove_tx_extra(local, sta->key, skb,
1342 &status->control);
1343 skb_queue_tail(&sta->tx_filtered, skb);
1344 return;
1345 }
1346
1347 if (!(sta->flags & WLAN_STA_PS) &&
1348 !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) {
1349 /* Software retry the packet once */
1350 status->control.flags |= IEEE80211_TXCTL_REQUEUE;
1351 ieee80211_remove_tx_extra(local, sta->key, skb,
1352 &status->control);
1353 dev_queue_xmit(skb);
1354 return;
1355 }
1356
1357 if (net_ratelimit())
1358 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
1359 "queue_len=%d PS=%d @%lu\n",
1360 wiphy_name(local->hw.wiphy),
1361 skb_queue_len(&sta->tx_filtered),
1362 !!(sta->flags & WLAN_STA_PS), jiffies);
1363 dev_kfree_skb(skb);
1364}
1365
825void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 1366void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
826 struct ieee80211_tx_status *status) 1367 struct ieee80211_tx_status *status)
827{ 1368{
@@ -831,7 +1372,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
831 u16 frag, type; 1372 u16 frag, type;
832 struct ieee80211_tx_status_rtap_hdr *rthdr; 1373 struct ieee80211_tx_status_rtap_hdr *rthdr;
833 struct ieee80211_sub_if_data *sdata; 1374 struct ieee80211_sub_if_data *sdata;
834 int monitors; 1375 struct net_device *prev_dev = NULL;
835 1376
836 if (!status) { 1377 if (!status) {
837 printk(KERN_ERR 1378 printk(KERN_ERR
@@ -841,18 +1382,24 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
841 return; 1382 return;
842 } 1383 }
843 1384
1385 rcu_read_lock();
1386
844 if (status->excessive_retries) { 1387 if (status->excessive_retries) {
845 struct sta_info *sta; 1388 struct sta_info *sta;
846 sta = sta_info_get(local, hdr->addr1); 1389 sta = sta_info_get(local, hdr->addr1);
847 if (sta) { 1390 if (sta) {
848 if (sta->flags & WLAN_STA_PS) { 1391 if (sta->flags & WLAN_STA_PS) {
849 /* The STA is in power save mode, so assume 1392 /*
1393 * The STA is in power save mode, so assume
850 * that this TX packet failed because of that. 1394 * that this TX packet failed because of that.
851 */ 1395 */
852 status->excessive_retries = 0; 1396 status->excessive_retries = 0;
853 status->flags |= IEEE80211_TX_STATUS_TX_FILTERED; 1397 status->flags |= IEEE80211_TX_STATUS_TX_FILTERED;
1398 ieee80211_handle_filtered_frame(local, sta,
1399 skb, status);
1400 rcu_read_unlock();
1401 return;
854 } 1402 }
855 sta_info_put(sta);
856 } 1403 }
857 } 1404 }
858 1405
@@ -860,53 +1407,16 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
860 struct sta_info *sta; 1407 struct sta_info *sta;
861 sta = sta_info_get(local, hdr->addr1); 1408 sta = sta_info_get(local, hdr->addr1);
862 if (sta) { 1409 if (sta) {
863 sta->tx_filtered_count++; 1410 ieee80211_handle_filtered_frame(local, sta, skb,
864 1411 status);
865 /* Clear the TX filter mask for this STA when sending 1412 rcu_read_unlock();
866 * the next packet. If the STA went to power save mode,
867 * this will happen when it is waking up for the next
868 * time. */
869 sta->clear_dst_mask = 1;
870
871 /* TODO: Is the WLAN_STA_PS flag always set here or is
872 * the race between RX and TX status causing some
873 * packets to be filtered out before 80211.o gets an
874 * update for PS status? This seems to be the case, so
875 * no changes are likely to be needed. */
876 if (sta->flags & WLAN_STA_PS &&
877 skb_queue_len(&sta->tx_filtered) <
878 STA_MAX_TX_BUFFER) {
879 ieee80211_remove_tx_extra(local, sta->key,
880 skb,
881 &status->control);
882 skb_queue_tail(&sta->tx_filtered, skb);
883 } else if (!(sta->flags & WLAN_STA_PS) &&
884 !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) {
885 /* Software retry the packet once */
886 status->control.flags |= IEEE80211_TXCTL_REQUEUE;
887 ieee80211_remove_tx_extra(local, sta->key,
888 skb,
889 &status->control);
890 dev_queue_xmit(skb);
891 } else {
892 if (net_ratelimit()) {
893 printk(KERN_DEBUG "%s: dropped TX "
894 "filtered frame queue_len=%d "
895 "PS=%d @%lu\n",
896 wiphy_name(local->hw.wiphy),
897 skb_queue_len(
898 &sta->tx_filtered),
899 !!(sta->flags & WLAN_STA_PS),
900 jiffies);
901 }
902 dev_kfree_skb(skb);
903 }
904 sta_info_put(sta);
905 return; 1413 return;
906 } 1414 }
907 } else 1415 } else
908 rate_control_tx_status(local->mdev, skb, status); 1416 rate_control_tx_status(local->mdev, skb, status);
909 1417
1418 rcu_read_unlock();
1419
910 ieee80211_led_tx(local, 0); 1420 ieee80211_led_tx(local, 0);
911 1421
912 /* SNMP counters 1422 /* SNMP counters
@@ -944,7 +1454,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
944 /* this was a transmitted frame, but now we want to reuse it */ 1454 /* this was a transmitted frame, but now we want to reuse it */
945 skb_orphan(skb); 1455 skb_orphan(skb);
946 1456
947 if (!local->monitors) { 1457 /*
1458 * This is a bit racy but we can avoid a lot of work
1459 * with this test...
1460 */
1461 if (!local->monitors && !local->cooked_mntrs) {
948 dev_kfree_skb(skb); 1462 dev_kfree_skb(skb);
949 return; 1463 return;
950 } 1464 }
@@ -978,51 +1492,44 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
978 1492
979 rthdr->data_retries = status->retry_count; 1493 rthdr->data_retries = status->retry_count;
980 1494
1495 /* XXX: is this sufficient for BPF? */
1496 skb_set_mac_header(skb, 0);
1497 skb->ip_summed = CHECKSUM_UNNECESSARY;
1498 skb->pkt_type = PACKET_OTHERHOST;
1499 skb->protocol = htons(ETH_P_802_2);
1500 memset(skb->cb, 0, sizeof(skb->cb));
1501
981 rcu_read_lock(); 1502 rcu_read_lock();
982 monitors = local->monitors;
983 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 1503 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
984 /*
985 * Using the monitors counter is possibly racy, but
986 * if the value is wrong we simply either clone the skb
987 * once too much or forget sending it to one monitor iface
988 * The latter case isn't nice but fixing the race is much
989 * more complicated.
990 */
991 if (!monitors || !skb)
992 goto out;
993
994 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) { 1504 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
995 if (!netif_running(sdata->dev)) 1505 if (!netif_running(sdata->dev))
996 continue; 1506 continue;
997 monitors--; 1507
998 if (monitors) 1508 if (prev_dev) {
999 skb2 = skb_clone(skb, GFP_ATOMIC); 1509 skb2 = skb_clone(skb, GFP_ATOMIC);
1000 else 1510 if (skb2) {
1001 skb2 = NULL; 1511 skb2->dev = prev_dev;
1002 skb->dev = sdata->dev; 1512 netif_rx(skb2);
1003 /* XXX: is this sufficient for BPF? */ 1513 }
1004 skb_set_mac_header(skb, 0); 1514 }
1005 skb->ip_summed = CHECKSUM_UNNECESSARY; 1515
1006 skb->pkt_type = PACKET_OTHERHOST; 1516 prev_dev = sdata->dev;
1007 skb->protocol = htons(ETH_P_802_2);
1008 memset(skb->cb, 0, sizeof(skb->cb));
1009 netif_rx(skb);
1010 skb = skb2;
1011 } 1517 }
1012 } 1518 }
1013 out: 1519 if (prev_dev) {
1520 skb->dev = prev_dev;
1521 netif_rx(skb);
1522 skb = NULL;
1523 }
1014 rcu_read_unlock(); 1524 rcu_read_unlock();
1015 if (skb) 1525 dev_kfree_skb(skb);
1016 dev_kfree_skb(skb);
1017} 1526}
1018EXPORT_SYMBOL(ieee80211_tx_status); 1527EXPORT_SYMBOL(ieee80211_tx_status);
1019 1528
1020struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 1529struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1021 const struct ieee80211_ops *ops) 1530 const struct ieee80211_ops *ops)
1022{ 1531{
1023 struct net_device *mdev;
1024 struct ieee80211_local *local; 1532 struct ieee80211_local *local;
1025 struct ieee80211_sub_if_data *sdata;
1026 int priv_size; 1533 int priv_size;
1027 struct wiphy *wiphy; 1534 struct wiphy *wiphy;
1028 1535
@@ -1068,25 +1575,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1068 BUG_ON(!ops->configure_filter); 1575 BUG_ON(!ops->configure_filter);
1069 local->ops = ops; 1576 local->ops = ops;
1070 1577
1071 /* for now, mdev needs sub_if_data :/ */
1072 mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data),
1073 "wmaster%d", ether_setup);
1074 if (!mdev) {
1075 wiphy_free(wiphy);
1076 return NULL;
1077 }
1078
1079 sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
1080 mdev->ieee80211_ptr = &sdata->wdev;
1081 sdata->wdev.wiphy = wiphy;
1082
1083 local->hw.queues = 1; /* default */ 1578 local->hw.queues = 1; /* default */
1084 1579
1085 local->mdev = mdev;
1086 local->rx_pre_handlers = ieee80211_rx_pre_handlers;
1087 local->rx_handlers = ieee80211_rx_handlers;
1088 local->tx_handlers = ieee80211_tx_handlers;
1089
1090 local->bridge_packets = 1; 1580 local->bridge_packets = 1;
1091 1581
1092 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 1582 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
@@ -1095,33 +1585,14 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1095 local->long_retry_limit = 4; 1585 local->long_retry_limit = 4;
1096 local->hw.conf.radio_enabled = 1; 1586 local->hw.conf.radio_enabled = 1;
1097 1587
1098 local->enabled_modes = ~0;
1099
1100 INIT_LIST_HEAD(&local->modes_list);
1101
1102 INIT_LIST_HEAD(&local->interfaces); 1588 INIT_LIST_HEAD(&local->interfaces);
1103 1589
1590 spin_lock_init(&local->key_lock);
1591
1104 INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work); 1592 INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
1105 ieee80211_rx_bss_list_init(mdev);
1106 1593
1107 sta_info_init(local); 1594 sta_info_init(local);
1108 1595
1109 mdev->hard_start_xmit = ieee80211_master_start_xmit;
1110 mdev->open = ieee80211_master_open;
1111 mdev->stop = ieee80211_master_stop;
1112 mdev->type = ARPHRD_IEEE80211;
1113 mdev->header_ops = &ieee80211_header_ops;
1114 mdev->set_multicast_list = ieee80211_master_set_multicast_list;
1115
1116 sdata->vif.type = IEEE80211_IF_TYPE_AP;
1117 sdata->dev = mdev;
1118 sdata->local = local;
1119 sdata->u.ap.force_unicast_rateidx = -1;
1120 sdata->u.ap.max_ratectrl_rateidx = -1;
1121 ieee80211_if_sdata_init(sdata);
1122 /* no RCU needed since we're still during init phase */
1123 list_add_tail(&sdata->list, &local->interfaces);
1124
1125 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, 1596 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
1126 (unsigned long)local); 1597 (unsigned long)local);
1127 tasklet_disable(&local->tx_pending_tasklet); 1598 tasklet_disable(&local->tx_pending_tasklet);
@@ -1143,11 +1614,63 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1143 struct ieee80211_local *local = hw_to_local(hw); 1614 struct ieee80211_local *local = hw_to_local(hw);
1144 const char *name; 1615 const char *name;
1145 int result; 1616 int result;
1617 enum ieee80211_band band;
1618 struct net_device *mdev;
1619 struct ieee80211_sub_if_data *sdata;
1620
1621 /*
1622 * generic code guarantees at least one band,
1623 * set this very early because much code assumes
1624 * that hw.conf.channel is assigned
1625 */
1626 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1627 struct ieee80211_supported_band *sband;
1628
1629 sband = local->hw.wiphy->bands[band];
1630 if (sband) {
1631 /* init channel we're on */
1632 local->hw.conf.channel =
1633 local->oper_channel =
1634 local->scan_channel = &sband->channels[0];
1635 break;
1636 }
1637 }
1146 1638
1147 result = wiphy_register(local->hw.wiphy); 1639 result = wiphy_register(local->hw.wiphy);
1148 if (result < 0) 1640 if (result < 0)
1149 return result; 1641 return result;
1150 1642
1643 /* for now, mdev needs sub_if_data :/ */
1644 mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data),
1645 "wmaster%d", ether_setup);
1646 if (!mdev)
1647 goto fail_mdev_alloc;
1648
1649 sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
1650 mdev->ieee80211_ptr = &sdata->wdev;
1651 sdata->wdev.wiphy = local->hw.wiphy;
1652
1653 local->mdev = mdev;
1654
1655 ieee80211_rx_bss_list_init(mdev);
1656
1657 mdev->hard_start_xmit = ieee80211_master_start_xmit;
1658 mdev->open = ieee80211_master_open;
1659 mdev->stop = ieee80211_master_stop;
1660 mdev->type = ARPHRD_IEEE80211;
1661 mdev->header_ops = &ieee80211_header_ops;
1662 mdev->set_multicast_list = ieee80211_master_set_multicast_list;
1663
1664 sdata->vif.type = IEEE80211_IF_TYPE_AP;
1665 sdata->dev = mdev;
1666 sdata->local = local;
1667 sdata->u.ap.force_unicast_rateidx = -1;
1668 sdata->u.ap.max_ratectrl_rateidx = -1;
1669 ieee80211_if_sdata_init(sdata);
1670
1671 /* no RCU needed since we're still during init phase */
1672 list_add_tail(&sdata->list, &local->interfaces);
1673
1151 name = wiphy_dev(local->hw.wiphy)->driver->name; 1674 name = wiphy_dev(local->hw.wiphy)->driver->name;
1152 local->hw.workqueue = create_singlethread_workqueue(name); 1675 local->hw.workqueue = create_singlethread_workqueue(name);
1153 if (!local->hw.workqueue) { 1676 if (!local->hw.workqueue) {
@@ -1215,7 +1738,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1215 1738
1216 /* add one default STA interface */ 1739 /* add one default STA interface */
1217 result = ieee80211_if_add(local->mdev, "wlan%d", NULL, 1740 result = ieee80211_if_add(local->mdev, "wlan%d", NULL,
1218 IEEE80211_IF_TYPE_STA); 1741 IEEE80211_IF_TYPE_STA, NULL);
1219 if (result) 1742 if (result)
1220 printk(KERN_WARNING "%s: Failed to add default virtual iface\n", 1743 printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
1221 wiphy_name(local->hw.wiphy)); 1744 wiphy_name(local->hw.wiphy));
@@ -1239,49 +1762,18 @@ fail_sta_info:
1239 debugfs_hw_del(local); 1762 debugfs_hw_del(local);
1240 destroy_workqueue(local->hw.workqueue); 1763 destroy_workqueue(local->hw.workqueue);
1241fail_workqueue: 1764fail_workqueue:
1765 ieee80211_if_free(local->mdev);
1766 local->mdev = NULL;
1767fail_mdev_alloc:
1242 wiphy_unregister(local->hw.wiphy); 1768 wiphy_unregister(local->hw.wiphy);
1243 return result; 1769 return result;
1244} 1770}
1245EXPORT_SYMBOL(ieee80211_register_hw); 1771EXPORT_SYMBOL(ieee80211_register_hw);
1246 1772
1247int ieee80211_register_hwmode(struct ieee80211_hw *hw,
1248 struct ieee80211_hw_mode *mode)
1249{
1250 struct ieee80211_local *local = hw_to_local(hw);
1251 struct ieee80211_rate *rate;
1252 int i;
1253
1254 INIT_LIST_HEAD(&mode->list);
1255 list_add_tail(&mode->list, &local->modes_list);
1256
1257 local->hw_modes |= (1 << mode->mode);
1258 for (i = 0; i < mode->num_rates; i++) {
1259 rate = &(mode->rates[i]);
1260 rate->rate_inv = CHAN_UTIL_RATE_LCM / rate->rate;
1261 }
1262 ieee80211_prepare_rates(local, mode);
1263
1264 if (!local->oper_hw_mode) {
1265 /* Default to this mode */
1266 local->hw.conf.phymode = mode->mode;
1267 local->oper_hw_mode = local->scan_hw_mode = mode;
1268 local->oper_channel = local->scan_channel = &mode->channels[0];
1269 local->hw.conf.mode = local->oper_hw_mode;
1270 local->hw.conf.chan = local->oper_channel;
1271 }
1272
1273 if (!(hw->flags & IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED))
1274 ieee80211_set_default_regdomain(mode);
1275
1276 return 0;
1277}
1278EXPORT_SYMBOL(ieee80211_register_hwmode);
1279
1280void ieee80211_unregister_hw(struct ieee80211_hw *hw) 1773void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1281{ 1774{
1282 struct ieee80211_local *local = hw_to_local(hw); 1775 struct ieee80211_local *local = hw_to_local(hw);
1283 struct ieee80211_sub_if_data *sdata, *tmp; 1776 struct ieee80211_sub_if_data *sdata, *tmp;
1284 int i;
1285 1777
1286 tasklet_kill(&local->tx_pending_tasklet); 1778 tasklet_kill(&local->tx_pending_tasklet);
1287 tasklet_kill(&local->tasklet); 1779 tasklet_kill(&local->tasklet);
@@ -1322,11 +1814,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1322 rate_control_deinitialize(local); 1814 rate_control_deinitialize(local);
1323 debugfs_hw_del(local); 1815 debugfs_hw_del(local);
1324 1816
1325 for (i = 0; i < NUM_IEEE80211_MODES; i++) {
1326 kfree(local->supp_rates[i]);
1327 kfree(local->basic_rates[i]);
1328 }
1329
1330 if (skb_queue_len(&local->skb_queue) 1817 if (skb_queue_len(&local->skb_queue)
1331 || skb_queue_len(&local->skb_queue_unreliable)) 1818 || skb_queue_len(&local->skb_queue_unreliable))
1332 printk(KERN_WARNING "%s: skb_queue not empty\n", 1819 printk(KERN_WARNING "%s: skb_queue not empty\n",
@@ -1338,6 +1825,8 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1338 wiphy_unregister(local->hw.wiphy); 1825 wiphy_unregister(local->hw.wiphy);
1339 ieee80211_wep_free(local); 1826 ieee80211_wep_free(local);
1340 ieee80211_led_exit(local); 1827 ieee80211_led_exit(local);
1828 ieee80211_if_free(local->mdev);
1829 local->mdev = NULL;
1341} 1830}
1342EXPORT_SYMBOL(ieee80211_unregister_hw); 1831EXPORT_SYMBOL(ieee80211_unregister_hw);
1343 1832
@@ -1345,7 +1834,6 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
1345{ 1834{
1346 struct ieee80211_local *local = hw_to_local(hw); 1835 struct ieee80211_local *local = hw_to_local(hw);
1347 1836
1348 ieee80211_if_free(local->mdev);
1349 wiphy_free(local->hw.wiphy); 1837 wiphy_free(local->hw.wiphy);
1350} 1838}
1351EXPORT_SYMBOL(ieee80211_free_hw); 1839EXPORT_SYMBOL(ieee80211_free_hw);
@@ -1357,13 +1845,9 @@ static int __init ieee80211_init(void)
1357 1845
1358 BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb)); 1846 BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb));
1359 1847
1360 ret = rc80211_simple_init();
1361 if (ret)
1362 goto out;
1363
1364 ret = rc80211_pid_init(); 1848 ret = rc80211_pid_init();
1365 if (ret) 1849 if (ret)
1366 goto out_cleanup_simple; 1850 goto out;
1367 1851
1368 ret = ieee80211_wme_register(); 1852 ret = ieee80211_wme_register();
1369 if (ret) { 1853 if (ret) {
@@ -1373,23 +1857,28 @@ static int __init ieee80211_init(void)
1373 } 1857 }
1374 1858
1375 ieee80211_debugfs_netdev_init(); 1859 ieee80211_debugfs_netdev_init();
1376 ieee80211_regdomain_init();
1377 1860
1378 return 0; 1861 return 0;
1379 1862
1380 out_cleanup_pid: 1863 out_cleanup_pid:
1381 rc80211_pid_exit(); 1864 rc80211_pid_exit();
1382 out_cleanup_simple:
1383 rc80211_simple_exit();
1384 out: 1865 out:
1385 return ret; 1866 return ret;
1386} 1867}
1387 1868
1388static void __exit ieee80211_exit(void) 1869static void __exit ieee80211_exit(void)
1389{ 1870{
1390 rc80211_simple_exit();
1391 rc80211_pid_exit(); 1871 rc80211_pid_exit();
1392 1872
1873 /*
1874 * For key todo, it'll be empty by now but the work
1875 * might still be scheduled.
1876 */
1877 flush_scheduled_work();
1878
1879 if (mesh_allocated)
1880 ieee80211s_stop();
1881
1393 ieee80211_wme_unregister(); 1882 ieee80211_wme_unregister();
1394 ieee80211_debugfs_netdev_exit(); 1883 ieee80211_debugfs_netdev_exit();
1395} 1884}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
new file mode 100644
index 000000000000..594a3356a508
--- /dev/null
+++ b/net/mac80211/mesh.c
@@ -0,0 +1,449 @@
1/*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Authors: Luis Carlos Cobo <luisca@cozybit.com>
4 * Javier Cardona <javier@cozybit.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include "ieee80211_i.h"
12#include "mesh.h"
13
14#define PP_OFFSET 1 /* Path Selection Protocol */
15#define PM_OFFSET 5 /* Path Selection Metric */
16#define CC_OFFSET 9 /* Congestion Control Mode */
17#define CAPAB_OFFSET 17
18#define ACCEPT_PLINKS 0x80
19
20int mesh_allocated;
21static struct kmem_cache *rm_cache;
22
23void ieee80211s_init(void)
24{
25 mesh_pathtbl_init();
26 mesh_allocated = 1;
27 rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry),
28 0, 0, NULL);
29}
30
31void ieee80211s_stop(void)
32{
33 mesh_pathtbl_unregister();
34 kmem_cache_destroy(rm_cache);
35}
36
37/**
38 * mesh_matches_local - check if the config of a mesh point matches ours
39 *
40 * @ie: information elements of a management frame from the mesh peer
41 * @dev: local mesh interface
42 *
43 * This function checks if the mesh configuration of a mesh point matches the
44 * local mesh configuration, i.e. if both nodes belong to the same mesh network.
45 */
46bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev)
47{
48 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
49 struct ieee80211_if_sta *sta = &sdata->u.sta;
50
51 /*
52 * As support for each feature is added, check for matching
53 * - On mesh config capabilities
54 * - Power Save Support En
55 * - Sync support enabled
56 * - Sync support active
57 * - Sync support required from peer
58 * - MDA enabled
59 * - Power management control on fc
60 */
61 if (sta->mesh_id_len == ie->mesh_id_len &&
62 memcmp(sta->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
63 memcmp(sta->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
64 memcmp(sta->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
65 memcmp(sta->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0)
66 return true;
67
68 return false;
69}
70
71/**
72 * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
73 *
74 * @ie: information elements of a management frame from the mesh peer
75 * @dev: local mesh interface
76 */
77bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie,
78 struct net_device *dev)
79{
80 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0;
81}
82
83/**
84 * mesh_accept_plinks_update: update accepting_plink in local mesh beacons
85 *
86 * @sdata: mesh interface in which mesh beacons are going to be updated
87 */
88void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
89{
90 bool free_plinks;
91
92 /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
93 * the mesh interface might be able to establish plinks with peers that
94 * are already on the table but are not on PLINK_ESTAB state. However,
95 * in general the mesh interface is not accepting peer link requests
96 * from new peers, and that must be reflected in the beacon
97 */
98 free_plinks = mesh_plink_availables(sdata);
99
100 if (free_plinks != sdata->u.sta.accepting_plinks)
101 ieee80211_sta_timer((unsigned long) sdata);
102}
103
104void mesh_ids_set_default(struct ieee80211_if_sta *sta)
105{
106 u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff};
107
108 memcpy(sta->mesh_pp_id, def_id, 4);
109 memcpy(sta->mesh_pm_id, def_id, 4);
110 memcpy(sta->mesh_cc_id, def_id, 4);
111}
112
113int mesh_rmc_init(struct net_device *dev)
114{
115 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
116 int i;
117
118 sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
119 if (!sdata->u.sta.rmc)
120 return -ENOMEM;
121 sdata->u.sta.rmc->idx_mask = RMC_BUCKETS - 1;
122 for (i = 0; i < RMC_BUCKETS; i++)
123 INIT_LIST_HEAD(&sdata->u.sta.rmc->bucket[i].list);
124 return 0;
125}
126
127void mesh_rmc_free(struct net_device *dev)
128{
129 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
130 struct mesh_rmc *rmc = sdata->u.sta.rmc;
131 struct rmc_entry *p, *n;
132 int i;
133
134 if (!sdata->u.sta.rmc)
135 return;
136
137 for (i = 0; i < RMC_BUCKETS; i++)
138 list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) {
139 list_del(&p->list);
140 kmem_cache_free(rm_cache, p);
141 }
142
143 kfree(rmc);
144 sdata->u.sta.rmc = NULL;
145}
146
147/**
148 * mesh_rmc_check - Check frame in recent multicast cache and add if absent.
149 *
150 * @sa: source address
151 * @mesh_hdr: mesh_header
152 *
153 * Returns: 0 if the frame is not in the cache, nonzero otherwise.
154 *
155 * Checks using the source address and the mesh sequence number if we have
156 * received this frame lately. If the frame is not in the cache, it is added to
157 * it.
158 */
159int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
160 struct net_device *dev)
161{
162 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
163 struct mesh_rmc *rmc = sdata->u.sta.rmc;
164 u32 seqnum = 0;
165 int entries = 0;
166 u8 idx;
167 struct rmc_entry *p, *n;
168
169 /* Don't care about endianness since only match matters */
170 memcpy(&seqnum, mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
171 idx = mesh_hdr->seqnum[0] & rmc->idx_mask;
172 list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) {
173 ++entries;
174 if (time_after(jiffies, p->exp_time) ||
175 (entries == RMC_QUEUE_MAX_LEN)) {
176 list_del(&p->list);
177 kmem_cache_free(rm_cache, p);
178 --entries;
179 } else if ((seqnum == p->seqnum)
180 && (memcmp(sa, p->sa, ETH_ALEN) == 0))
181 return -1;
182 }
183
184 p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
185 if (!p) {
186 printk(KERN_DEBUG "o11s: could not allocate RMC entry\n");
187 return 0;
188 }
189 p->seqnum = seqnum;
190 p->exp_time = jiffies + RMC_TIMEOUT;
191 memcpy(p->sa, sa, ETH_ALEN);
192 list_add(&p->list, &rmc->bucket[idx].list);
193 return 0;
194}
195
196void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
197{
198 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
199 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
200 struct ieee80211_supported_band *sband;
201 u8 *pos;
202 int len, i, rate;
203
204 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
205 len = sband->n_bitrates;
206 if (len > 8)
207 len = 8;
208 pos = skb_put(skb, len + 2);
209 *pos++ = WLAN_EID_SUPP_RATES;
210 *pos++ = len;
211 for (i = 0; i < len; i++) {
212 rate = sband->bitrates[i].bitrate;
213 *pos++ = (u8) (rate / 5);
214 }
215
216 if (sband->n_bitrates > len) {
217 pos = skb_put(skb, sband->n_bitrates - len + 2);
218 *pos++ = WLAN_EID_EXT_SUPP_RATES;
219 *pos++ = sband->n_bitrates - len;
220 for (i = len; i < sband->n_bitrates; i++) {
221 rate = sband->bitrates[i].bitrate;
222 *pos++ = (u8) (rate / 5);
223 }
224 }
225
226 pos = skb_put(skb, 2 + sdata->u.sta.mesh_id_len);
227 *pos++ = WLAN_EID_MESH_ID;
228 *pos++ = sdata->u.sta.mesh_id_len;
229 if (sdata->u.sta.mesh_id_len)
230 memcpy(pos, sdata->u.sta.mesh_id, sdata->u.sta.mesh_id_len);
231
232 pos = skb_put(skb, 21);
233 *pos++ = WLAN_EID_MESH_CONFIG;
234 *pos++ = MESH_CFG_LEN;
235 /* Version */
236 *pos++ = 1;
237
238 /* Active path selection protocol ID */
239 memcpy(pos, sdata->u.sta.mesh_pp_id, 4);
240 pos += 4;
241
242 /* Active path selection metric ID */
243 memcpy(pos, sdata->u.sta.mesh_pm_id, 4);
244 pos += 4;
245
246 /* Congestion control mode identifier */
247 memcpy(pos, sdata->u.sta.mesh_cc_id, 4);
248 pos += 4;
249
250 /* Channel precedence:
251 * Not running simple channel unification protocol
252 */
253 memset(pos, 0x00, 4);
254 pos += 4;
255
256 /* Mesh capability */
257 sdata->u.sta.accepting_plinks = mesh_plink_availables(sdata);
258 *pos++ = sdata->u.sta.accepting_plinks ? ACCEPT_PLINKS : 0x00;
259 *pos++ = 0x00;
260
261 return;
262}
263
264u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl)
265{
266 /* Use last four bytes of hw addr and interface index as hash index */
267 return jhash_2words(*(u32 *)(addr+2), dev->ifindex, tbl->hash_rnd)
268 & tbl->hash_mask;
269}
270
271u8 mesh_id_hash(u8 *mesh_id, int mesh_id_len)
272{
273 if (!mesh_id_len)
274 return 1;
275 else if (mesh_id_len == 1)
276 return (u8) mesh_id[0];
277 else
278 return (u8) (mesh_id[0] + 2 * mesh_id[1]);
279}
280
281struct mesh_table *mesh_table_alloc(int size_order)
282{
283 int i;
284 struct mesh_table *newtbl;
285
286 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
287 if (!newtbl)
288 return NULL;
289
290 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
291 (1 << size_order), GFP_KERNEL);
292
293 if (!newtbl->hash_buckets) {
294 kfree(newtbl);
295 return NULL;
296 }
297
298 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
299 (1 << size_order), GFP_KERNEL);
300 if (!newtbl->hashwlock) {
301 kfree(newtbl->hash_buckets);
302 kfree(newtbl);
303 return NULL;
304 }
305
306 newtbl->size_order = size_order;
307 newtbl->hash_mask = (1 << size_order) - 1;
308 atomic_set(&newtbl->entries, 0);
309 get_random_bytes(&newtbl->hash_rnd,
310 sizeof(newtbl->hash_rnd));
311 for (i = 0; i <= newtbl->hash_mask; i++)
312 spin_lock_init(&newtbl->hashwlock[i]);
313
314 return newtbl;
315}
316
317void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
318{
319 struct hlist_head *mesh_hash;
320 struct hlist_node *p, *q;
321 int i;
322
323 mesh_hash = tbl->hash_buckets;
324 for (i = 0; i <= tbl->hash_mask; i++) {
325 spin_lock(&tbl->hashwlock[i]);
326 hlist_for_each_safe(p, q, &mesh_hash[i]) {
327 tbl->free_node(p, free_leafs);
328 atomic_dec(&tbl->entries);
329 }
330 spin_unlock(&tbl->hashwlock[i]);
331 }
332 kfree(tbl->hash_buckets);
333 kfree(tbl->hashwlock);
334 kfree(tbl);
335}
336
337static void ieee80211_mesh_path_timer(unsigned long data)
338{
339 struct ieee80211_sub_if_data *sdata =
340 (struct ieee80211_sub_if_data *) data;
341 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
342 struct ieee80211_local *local = wdev_priv(&sdata->wdev);
343
344 queue_work(local->hw.workqueue, &ifsta->work);
345}
346
347struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
348{
349 struct mesh_table *newtbl;
350 struct hlist_head *oldhash;
351 struct hlist_node *p;
352 int err = 0;
353 int i;
354
355 if (atomic_read(&tbl->entries)
356 < tbl->mean_chain_len * (tbl->hash_mask + 1)) {
357 err = -EPERM;
358 goto endgrow;
359 }
360
361 newtbl = mesh_table_alloc(tbl->size_order + 1);
362 if (!newtbl) {
363 err = -ENOMEM;
364 goto endgrow;
365 }
366
367 newtbl->free_node = tbl->free_node;
368 newtbl->mean_chain_len = tbl->mean_chain_len;
369 newtbl->copy_node = tbl->copy_node;
370 atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
371
372 oldhash = tbl->hash_buckets;
373 for (i = 0; i <= tbl->hash_mask; i++)
374 hlist_for_each(p, &oldhash[i])
375 tbl->copy_node(p, newtbl);
376
377endgrow:
378 if (err)
379 return NULL;
380 else
381 return newtbl;
382}
383
384/**
385 * ieee80211_new_mesh_header - create a new mesh header
386 * @meshhdr: uninitialized mesh header
387 * @sdata: mesh interface to be used
388 *
389 * Return the header length.
390 */
391int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
392 struct ieee80211_sub_if_data *sdata)
393{
394 meshhdr->flags = 0;
395 meshhdr->ttl = sdata->u.sta.mshcfg.dot11MeshTTL;
396
397 meshhdr->seqnum[0] = sdata->u.sta.mesh_seqnum[0]++;
398 meshhdr->seqnum[1] = sdata->u.sta.mesh_seqnum[1];
399 meshhdr->seqnum[2] = sdata->u.sta.mesh_seqnum[2];
400
401 if (sdata->u.sta.mesh_seqnum[0] == 0) {
402 sdata->u.sta.mesh_seqnum[1]++;
403 if (sdata->u.sta.mesh_seqnum[1] == 0)
404 sdata->u.sta.mesh_seqnum[2]++;
405 }
406
407 return 5;
408}
409
410void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
411{
412 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
413
414 ifsta->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
415 ifsta->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
416 ifsta->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T;
417 ifsta->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR;
418 ifsta->mshcfg.dot11MeshTTL = MESH_TTL;
419 ifsta->mshcfg.auto_open_plinks = true;
420 ifsta->mshcfg.dot11MeshMaxPeerLinks =
421 MESH_MAX_ESTAB_PLINKS;
422 ifsta->mshcfg.dot11MeshHWMPactivePathTimeout =
423 MESH_PATH_TIMEOUT;
424 ifsta->mshcfg.dot11MeshHWMPpreqMinInterval =
425 MESH_PREQ_MIN_INT;
426 ifsta->mshcfg.dot11MeshHWMPnetDiameterTraversalTime =
427 MESH_DIAM_TRAVERSAL_TIME;
428 ifsta->mshcfg.dot11MeshHWMPmaxPREQretries =
429 MESH_MAX_PREQ_RETRIES;
430 ifsta->mshcfg.path_refresh_time =
431 MESH_PATH_REFRESH_TIME;
432 ifsta->mshcfg.min_discovery_timeout =
433 MESH_MIN_DISCOVERY_TIMEOUT;
434 ifsta->accepting_plinks = true;
435 ifsta->preq_id = 0;
436 ifsta->dsn = 0;
437 atomic_set(&ifsta->mpaths, 0);
438 mesh_rmc_init(sdata->dev);
439 ifsta->last_preq = jiffies;
440 /* Allocate all mesh structures when creating the first mesh interface. */
441 if (!mesh_allocated)
442 ieee80211s_init();
443 mesh_ids_set_default(ifsta);
444 setup_timer(&ifsta->mesh_path_timer,
445 ieee80211_mesh_path_timer,
446 (unsigned long) sdata);
447 INIT_LIST_HEAD(&ifsta->preq_queue.list);
448 spin_lock_init(&ifsta->mesh_preq_queue_lock);
449}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
new file mode 100644
index 000000000000..742003d3a841
--- /dev/null
+++ b/net/mac80211/mesh.h
@@ -0,0 +1,290 @@
1/*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Authors: Luis Carlos Cobo <luisca@cozybit.com>
4 * Javier Cardona <javier@cozybit.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef IEEE80211S_H
12#define IEEE80211S_H
13
14#include <linux/types.h>
15#include <linux/jhash.h>
16#include "ieee80211_i.h"
17
18
19/* Data structures */
20
21/**
22 * enum mesh_path_flags - mac80211 mesh path flags
23 *
24 *
25 *
26 * @MESH_PATH_ACTIVE: the mesh path is can be used for forwarding
27 * @MESH_PATH_RESOLVED: the discovery process is running for this mesh path
28 * @MESH_PATH_DSN_VALID: the mesh path contains a valid destination sequence
29 * number
30 * @MESH_PATH_FIXED: the mesh path has been manually set and should not be
31 * modified
32 * @MESH_PATH_RESOLVED: the mesh path can has been resolved
33 *
34 * MESH_PATH_RESOLVED and MESH_PATH_DELETE are used by the mesh path timer to
35 * decide when to stop or cancel the mesh path discovery.
36 */
37enum mesh_path_flags {
38 MESH_PATH_ACTIVE = BIT(0),
39 MESH_PATH_RESOLVING = BIT(1),
40 MESH_PATH_DSN_VALID = BIT(2),
41 MESH_PATH_FIXED = BIT(3),
42 MESH_PATH_RESOLVED = BIT(4),
43};
44
45/**
46 * struct mesh_path - mac80211 mesh path structure
47 *
48 * @dst: mesh path destination mac address
49 * @dev: mesh path device
50 * @next_hop: mesh neighbor to which frames for this destination will be
51 * forwarded
52 * @timer: mesh path discovery timer
53 * @frame_queue: pending queue for frames sent to this destination while the
54 * path is unresolved
55 * @dsn: destination sequence number of the destination
56 * @metric: current metric to this destination
57 * @hop_count: hops to destination
58 * @exp_time: in jiffies, when the path will expire or when it expired
59 * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery
60 * retry
61 * @discovery_retries: number of discovery retries
62 * @flags: mesh path flags, as specified on &enum mesh_path_flags
63 * @state_lock: mesh pat state lock
64 *
65 *
66 * The combination of dst and dev is unique in the mesh path table. Since the
67 * next_hop STA is only protected by RCU as well, deleting the STA must also
68 * remove/substitute the mesh_path structure and wait until that is no longer
69 * reachable before destroying the STA completely.
70 */
71struct mesh_path {
72 u8 dst[ETH_ALEN];
73 struct net_device *dev;
74 struct sta_info *next_hop;
75 struct timer_list timer;
76 struct sk_buff_head frame_queue;
77 struct rcu_head rcu;
78 u32 dsn;
79 u32 metric;
80 u8 hop_count;
81 unsigned long exp_time;
82 u32 discovery_timeout;
83 u8 discovery_retries;
84 enum mesh_path_flags flags;
85 spinlock_t state_lock;
86};
87
88/**
89 * struct mesh_table
90 *
91 * @hash_buckets: array of hash buckets of the table
92 * @hashwlock: array of locks to protect write operations, one per bucket
93 * @hash_mask: 2^size_order - 1, used to compute hash idx
94 * @hash_rnd: random value used for hash computations
95 * @entries: number of entries in the table
96 * @free_node: function to free nodes of the table
97 * @copy_node: fuction to copy nodes of the table
98 * @size_order: determines size of the table, there will be 2^size_order hash
99 * buckets
100 * @mean_chain_len: maximum average length for the hash buckets' list, if it is
101 * reached, the table will grow
102 */
103struct mesh_table {
104 /* Number of buckets will be 2^N */
105 struct hlist_head *hash_buckets;
106 spinlock_t *hashwlock; /* One per bucket, for add/del */
107 unsigned int hash_mask; /* (2^size_order) - 1 */
108 __u32 hash_rnd; /* Used for hash generation */
109 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
110 void (*free_node) (struct hlist_node *p, bool free_leafs);
111 void (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
112 int size_order;
113 int mean_chain_len;
114};
115
116/* Recent multicast cache */
117/* RMC_BUCKETS must be a power of 2, maximum 256 */
118#define RMC_BUCKETS 256
119#define RMC_QUEUE_MAX_LEN 4
120#define RMC_TIMEOUT (3 * HZ)
121
122/**
123 * struct rmc_entry - entry in the Recent Multicast Cache
124 *
125 * @seqnum: mesh sequence number of the frame
126 * @exp_time: expiration time of the entry, in jiffies
127 * @sa: source address of the frame
128 *
129 * The Recent Multicast Cache keeps track of the latest multicast frames that
130 * have been received by a mesh interface and discards received multicast frames
131 * that are found in the cache.
132 */
133struct rmc_entry {
134 struct list_head list;
135 u32 seqnum;
136 unsigned long exp_time;
137 u8 sa[ETH_ALEN];
138};
139
140struct mesh_rmc {
141 struct rmc_entry bucket[RMC_BUCKETS];
142 u8 idx_mask;
143};
144
145
146/* Mesh IEs constants */
147#define MESH_CFG_LEN 19
148
149/*
150 * MESH_CFG_COMP_LEN Includes:
151 * - Active path selection protocol ID.
152 * - Active path selection metric ID.
153 * - Congestion control mode identifier.
154 * - Channel precedence.
155 * Does not include mesh capabilities, which may vary across nodes in the same
156 * mesh
157 */
158#define MESH_CFG_CMP_LEN 17
159
160/* Default values, timeouts in ms */
161#define MESH_TTL 5
162#define MESH_MAX_RETR 3
163#define MESH_RET_T 100
164#define MESH_CONF_T 100
165#define MESH_HOLD_T 100
166
167#define MESH_PATH_TIMEOUT 5000
168/* Minimum interval between two consecutive PREQs originated by the same
169 * interface
170 */
171#define MESH_PREQ_MIN_INT 10
172#define MESH_DIAM_TRAVERSAL_TIME 50
173/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their
174 * expiration
175 */
176#define MESH_PATH_REFRESH_TIME 1000
177#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
178
179#define MESH_MAX_PREQ_RETRIES 4
180#define MESH_PATH_EXPIRE (600 * HZ)
181
182/* Default maximum number of established plinks per interface */
183#define MESH_MAX_ESTAB_PLINKS 32
184
185/* Default maximum number of plinks per interface */
186#define MESH_MAX_PLINKS 256
187
188/* Maximum number of paths per interface */
189#define MESH_MAX_MPATHS 1024
190
191/* Pending ANA approval */
192#define PLINK_CATEGORY 30
193#define MESH_PATH_SEL_CATEGORY 32
194
195/* Mesh Header Flags */
196#define IEEE80211S_FLAGS_AE 0x3
197
198/* Public interfaces */
199/* Various */
200u8 mesh_id_hash(u8 *mesh_id, int mesh_id_len);
201int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
202int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
203 struct ieee80211_sub_if_data *sdata);
204int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
205 struct net_device *dev);
206bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev);
207void mesh_ids_set_default(struct ieee80211_if_sta *sta);
208void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev);
209void mesh_rmc_free(struct net_device *dev);
210int mesh_rmc_init(struct net_device *dev);
211void ieee80211s_init(void);
212void ieee80211s_stop(void);
213void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
214
215/* Mesh paths */
216int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb,
217 struct net_device *dev);
218void mesh_path_start_discovery(struct net_device *dev);
219struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev);
220struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev);
221void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
222void mesh_path_expire(struct net_device *dev);
223void mesh_path_flush(struct net_device *dev);
224void mesh_rx_path_sel_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
225 size_t len);
226int mesh_path_add(u8 *dst, struct net_device *dev);
227/* Mesh plinks */
228void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev,
229 bool add);
230bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie,
231 struct net_device *dev);
232void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
233void mesh_plink_broken(struct sta_info *sta);
234void mesh_plink_deactivate(struct sta_info *sta);
235int mesh_plink_open(struct sta_info *sta);
236int mesh_plink_close(struct sta_info *sta);
237void mesh_plink_block(struct sta_info *sta);
238void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
239 size_t len, struct ieee80211_rx_status *rx_status);
240
241/* Private interfaces */
242/* Mesh tables */
243struct mesh_table *mesh_table_alloc(int size_order);
244void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
245struct mesh_table *mesh_table_grow(struct mesh_table *tbl);
246u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl);
247/* Mesh paths */
248int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra,
249 struct net_device *dev);
250void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
251void mesh_path_flush_pending(struct mesh_path *mpath);
252void mesh_path_tx_pending(struct mesh_path *mpath);
253int mesh_pathtbl_init(void);
254void mesh_pathtbl_unregister(void);
255int mesh_path_del(u8 *addr, struct net_device *dev);
256void mesh_path_timer(unsigned long data);
257void mesh_path_flush_by_nexthop(struct sta_info *sta);
258void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev);
259
260#ifdef CONFIG_MAC80211_MESH
261extern int mesh_allocated;
262
263static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata)
264{
265 return sdata->u.sta.mshcfg.dot11MeshMaxPeerLinks -
266 atomic_read(&sdata->u.sta.mshstats.estab_plinks);
267}
268
269static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata)
270{
271 return (min_t(long, mesh_plink_free_count(sdata),
272 MESH_MAX_PLINKS - sdata->local->num_sta)) > 0;
273}
274
275static inline void mesh_path_activate(struct mesh_path *mpath)
276{
277 mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED;
278}
279
280#define for_each_mesh_entry(x, p, node, i) \
281 for (i = 0; i <= x->hash_mask; i++) \
282 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
283
284#else
285#define mesh_allocated 0
286#endif
287
288#define MESH_PREQ(skb) (skb->cb + 30)
289
290#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
new file mode 100644
index 000000000000..02de8f1522a3
--- /dev/null
+++ b/net/mac80211/mesh_hwmp.c
@@ -0,0 +1,855 @@
1/*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <asm/unaligned.h>
11#include "mesh.h"
12
13#define TEST_FRAME_LEN 8192
14#define MAX_METRIC 0xffffffff
15#define ARITH_SHIFT 8
16
17/* Number of frames buffered per destination for unresolved destinations */
18#define MESH_FRAME_QUEUE_LEN 10
19#define MAX_PREQ_QUEUE_LEN 64
20
21/* Destination only */
22#define MP_F_DO 0x1
23/* Reply and forward */
24#define MP_F_RF 0x2
25
26static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
27{
28 if (ae)
29 offset += 6;
30 return le32_to_cpu(get_unaligned((__le32 *) (preq_elem + offset)));
31}
32
33/* HWMP IE processing macros */
34#define AE_F (1<<6)
35#define AE_F_SET(x) (*x & AE_F)
36#define PREQ_IE_FLAGS(x) (*(x))
37#define PREQ_IE_HOPCOUNT(x) (*(x + 1))
38#define PREQ_IE_TTL(x) (*(x + 2))
39#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
40#define PREQ_IE_ORIG_ADDR(x) (x + 7)
41#define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0);
42#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x));
43#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x));
44#define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
45#define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
46#define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x));
47
48
49#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
50#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
51#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
52#define PREP_IE_ORIG_ADDR(x) (x + 3)
53#define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0);
54#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x));
55#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x));
56#define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
57#define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x));
58
59#define PERR_IE_DST_ADDR(x) (x + 2)
60#define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0);
61
62#define TU_TO_EXP_TIME(x) (jiffies + msecs_to_jiffies(x * 1024 / 1000))
63#define MSEC_TO_TU(x) (x*1000/1024)
64#define DSN_GT(x, y) ((long) (y) - (long) (x) < 0)
65#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
66
67#define net_traversal_jiffies(s) \
68 msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
69#define default_lifetime(s) \
70 MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout)
71#define min_preq_int_jiff(s) \
72 (msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval))
73#define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries)
74#define disc_timeout_jiff(s) \
75 msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout)
76
77enum mpath_frame_type {
78 MPATH_PREQ = 0,
79 MPATH_PREP,
80 MPATH_PERR
81};
82
83static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
84 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
85 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
86 __le32 metric, __le32 preq_id, struct net_device *dev)
87{
88 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
89 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
90 struct ieee80211_mgmt *mgmt;
91 u8 *pos;
92 int ie_len;
93
94 if (!skb)
95 return -1;
96 skb_reserve(skb, local->hw.extra_tx_headroom);
97 /* 25 is the size of the common mgmt part (24) plus the size of the
98 * common action part (1)
99 */
100 mgmt = (struct ieee80211_mgmt *)
101 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
102 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
103 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
104 IEEE80211_STYPE_ACTION);
105
106 memcpy(mgmt->da, da, ETH_ALEN);
107 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
108 /* BSSID is left zeroed, wildcard value */
109 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
110 mgmt->u.action.u.mesh_action.action_code = action;
111
112 switch (action) {
113 case MPATH_PREQ:
114 ie_len = 37;
115 pos = skb_put(skb, 2 + ie_len);
116 *pos++ = WLAN_EID_PREQ;
117 break;
118 case MPATH_PREP:
119 ie_len = 31;
120 pos = skb_put(skb, 2 + ie_len);
121 *pos++ = WLAN_EID_PREP;
122 break;
123 default:
124 kfree(skb);
125 return -ENOTSUPP;
126 break;
127 }
128 *pos++ = ie_len;
129 *pos++ = flags;
130 *pos++ = hop_count;
131 *pos++ = ttl;
132 if (action == MPATH_PREQ) {
133 memcpy(pos, &preq_id, 4);
134 pos += 4;
135 }
136 memcpy(pos, orig_addr, ETH_ALEN);
137 pos += ETH_ALEN;
138 memcpy(pos, &orig_dsn, 4);
139 pos += 4;
140 memcpy(pos, &lifetime, 4);
141 pos += 4;
142 memcpy(pos, &metric, 4);
143 pos += 4;
144 if (action == MPATH_PREQ) {
145 /* destination count */
146 *pos++ = 1;
147 *pos++ = dst_flags;
148 }
149 memcpy(pos, dst, ETH_ALEN);
150 pos += ETH_ALEN;
151 memcpy(pos, &dst_dsn, 4);
152
153 ieee80211_sta_tx(dev, skb, 0);
154 return 0;
155}
156
157/**
158 * mesh_send_path error - Sends a PERR mesh management frame
159 *
160 * @dst: broken destination
161 * @dst_dsn: dsn of the broken destination
162 * @ra: node this frame is addressed to
163 */
164int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
165 struct net_device *dev)
166{
167 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
168 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
169 struct ieee80211_mgmt *mgmt;
170 u8 *pos;
171 int ie_len;
172
173 if (!skb)
174 return -1;
175 skb_reserve(skb, local->hw.extra_tx_headroom);
176 /* 25 is the size of the common mgmt part (24) plus the size of the
177 * common action part (1)
178 */
179 mgmt = (struct ieee80211_mgmt *)
180 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
181 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
182 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
183 IEEE80211_STYPE_ACTION);
184
185 memcpy(mgmt->da, ra, ETH_ALEN);
186 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
187 /* BSSID is left zeroed, wildcard value */
188 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
189 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
190 ie_len = 12;
191 pos = skb_put(skb, 2 + ie_len);
192 *pos++ = WLAN_EID_PERR;
193 *pos++ = ie_len;
194 /* mode flags, reserved */
195 *pos++ = 0;
196 /* number of destinations */
197 *pos++ = 1;
198 memcpy(pos, dst, ETH_ALEN);
199 pos += ETH_ALEN;
200 memcpy(pos, &dst_dsn, 4);
201
202 ieee80211_sta_tx(dev, skb, 0);
203 return 0;
204}
205
206static u32 airtime_link_metric_get(struct ieee80211_local *local,
207 struct sta_info *sta)
208{
209 struct ieee80211_supported_band *sband;
210 /* This should be adjusted for each device */
211 int device_constant = 1 << ARITH_SHIFT;
212 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
213 int s_unit = 1 << ARITH_SHIFT;
214 int rate, err;
215 u32 tx_time, estimated_retx;
216 u64 result;
217
218 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
219
220 if (sta->fail_avg >= 100)
221 return MAX_METRIC;
222 err = (sta->fail_avg << ARITH_SHIFT) / 100;
223
224 /* bitrate is in units of 100 Kbps, while we need rate in units of
225 * 1Mbps. This will be corrected on tx_time computation.
226 */
227 rate = sband->bitrates[sta->txrate_idx].bitrate;
228 tx_time = (device_constant + 10 * test_frame_len / rate);
229 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
230 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
231 return (u32)result;
232}
233
234/**
235 * hwmp_route_info_get - Update routing info to originator and transmitter
236 *
237 * @dev: local mesh interface
238 * @mgmt: mesh management frame
239 * @hwmp_ie: hwmp information element (PREP or PREQ)
240 *
241 * This function updates the path routing information to the originator and the
242 * transmitter of a HWMP PREQ or PREP fram.
243 *
244 * Returns: metric to frame originator or 0 if the frame should not be further
245 * processed
246 *
247 * Notes: this function is the only place (besides user-provided info) where
248 * path routing information is updated.
249 */
250static u32 hwmp_route_info_get(struct net_device *dev,
251 struct ieee80211_mgmt *mgmt,
252 u8 *hwmp_ie)
253{
254 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
255 struct mesh_path *mpath;
256 struct sta_info *sta;
257 bool fresh_info;
258 u8 *orig_addr, *ta;
259 u32 orig_dsn, orig_metric;
260 unsigned long orig_lifetime, exp_time;
261 u32 last_hop_metric, new_metric;
262 bool process = true;
263 u8 action = mgmt->u.action.u.mesh_action.action_code;
264
265 rcu_read_lock();
266 sta = sta_info_get(local, mgmt->sa);
267 if (!sta) {
268 rcu_read_unlock();
269 return 0;
270 }
271
272 last_hop_metric = airtime_link_metric_get(local, sta);
273 /* Update and check originator routing info */
274 fresh_info = true;
275
276 switch (action) {
277 case MPATH_PREQ:
278 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
279 orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie);
280 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
281 orig_metric = PREQ_IE_METRIC(hwmp_ie);
282 break;
283 case MPATH_PREP:
284 /* Originator here refers to the MP that was the destination in
285 * the Path Request. The draft refers to that MP as the
286 * destination address, even though usually it is the origin of
287 * the PREP frame. We divert from the nomenclature in the draft
288 * so that we can easily use a single function to gather path
289 * information from both PREQ and PREP frames.
290 */
291 orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
292 orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie);
293 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
294 orig_metric = PREP_IE_METRIC(hwmp_ie);
295 break;
296 default:
297 rcu_read_unlock();
298 return 0;
299 }
300 new_metric = orig_metric + last_hop_metric;
301 if (new_metric < orig_metric)
302 new_metric = MAX_METRIC;
303 exp_time = TU_TO_EXP_TIME(orig_lifetime);
304
305 if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) {
306 /* This MP is the originator, we are not interested in this
307 * frame, except for updating transmitter's path info.
308 */
309 process = false;
310 fresh_info = false;
311 } else {
312 mpath = mesh_path_lookup(orig_addr, dev);
313 if (mpath) {
314 spin_lock_bh(&mpath->state_lock);
315 if (mpath->flags & MESH_PATH_FIXED)
316 fresh_info = false;
317 else if ((mpath->flags & MESH_PATH_ACTIVE) &&
318 (mpath->flags & MESH_PATH_DSN_VALID)) {
319 if (DSN_GT(mpath->dsn, orig_dsn) ||
320 (mpath->dsn == orig_dsn &&
321 action == MPATH_PREQ &&
322 new_metric > mpath->metric)) {
323 process = false;
324 fresh_info = false;
325 }
326 }
327 } else {
328 mesh_path_add(orig_addr, dev);
329 mpath = mesh_path_lookup(orig_addr, dev);
330 if (!mpath) {
331 rcu_read_unlock();
332 return 0;
333 }
334 spin_lock_bh(&mpath->state_lock);
335 }
336
337 if (fresh_info) {
338 mesh_path_assign_nexthop(mpath, sta);
339 mpath->flags |= MESH_PATH_DSN_VALID;
340 mpath->metric = new_metric;
341 mpath->dsn = orig_dsn;
342 mpath->exp_time = time_after(mpath->exp_time, exp_time)
343 ? mpath->exp_time : exp_time;
344 mesh_path_activate(mpath);
345 spin_unlock_bh(&mpath->state_lock);
346 mesh_path_tx_pending(mpath);
347 /* draft says preq_id should be saved to, but there does
348 * not seem to be any use for it, skipping by now
349 */
350 } else
351 spin_unlock_bh(&mpath->state_lock);
352 }
353
354 /* Update and check transmitter routing info */
355 ta = mgmt->sa;
356 if (memcmp(orig_addr, ta, ETH_ALEN) == 0)
357 fresh_info = false;
358 else {
359 fresh_info = true;
360
361 mpath = mesh_path_lookup(ta, dev);
362 if (mpath) {
363 spin_lock_bh(&mpath->state_lock);
364 if ((mpath->flags & MESH_PATH_FIXED) ||
365 ((mpath->flags & MESH_PATH_ACTIVE) &&
366 (last_hop_metric > mpath->metric)))
367 fresh_info = false;
368 } else {
369 mesh_path_add(ta, dev);
370 mpath = mesh_path_lookup(ta, dev);
371 if (!mpath) {
372 rcu_read_unlock();
373 return 0;
374 }
375 spin_lock_bh(&mpath->state_lock);
376 }
377
378 if (fresh_info) {
379 mesh_path_assign_nexthop(mpath, sta);
380 mpath->flags &= ~MESH_PATH_DSN_VALID;
381 mpath->metric = last_hop_metric;
382 mpath->exp_time = time_after(mpath->exp_time, exp_time)
383 ? mpath->exp_time : exp_time;
384 mesh_path_activate(mpath);
385 spin_unlock_bh(&mpath->state_lock);
386 mesh_path_tx_pending(mpath);
387 } else
388 spin_unlock_bh(&mpath->state_lock);
389 }
390
391 rcu_read_unlock();
392
393 return process ? new_metric : 0;
394}
395
396static void hwmp_preq_frame_process(struct net_device *dev,
397 struct ieee80211_mgmt *mgmt,
398 u8 *preq_elem, u32 metric) {
399 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
400 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
401 struct mesh_path *mpath;
402 u8 *dst_addr, *orig_addr;
403 u8 dst_flags, ttl;
404 u32 orig_dsn, dst_dsn, lifetime;
405 bool reply = false;
406 bool forward = true;
407
408 /* Update destination DSN, if present */
409 dst_addr = PREQ_IE_DST_ADDR(preq_elem);
410 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
411 dst_dsn = PREQ_IE_DST_DSN(preq_elem);
412 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
413 dst_flags = PREQ_IE_DST_F(preq_elem);
414
415 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) {
416 forward = false;
417 reply = true;
418 metric = 0;
419 if (time_after(jiffies, ifsta->last_dsn_update +
420 net_traversal_jiffies(sdata)) ||
421 time_before(jiffies, ifsta->last_dsn_update)) {
422 dst_dsn = ++ifsta->dsn;
423 ifsta->last_dsn_update = jiffies;
424 }
425 } else {
426 rcu_read_lock();
427 mpath = mesh_path_lookup(dst_addr, dev);
428 if (mpath) {
429 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
430 DSN_LT(mpath->dsn, dst_dsn)) {
431 mpath->dsn = dst_dsn;
432 mpath->flags &= MESH_PATH_DSN_VALID;
433 } else if ((!(dst_flags & MP_F_DO)) &&
434 (mpath->flags & MESH_PATH_ACTIVE)) {
435 reply = true;
436 metric = mpath->metric;
437 dst_dsn = mpath->dsn;
438 if (dst_flags & MP_F_RF)
439 dst_flags |= MP_F_DO;
440 else
441 forward = false;
442 }
443 }
444 rcu_read_unlock();
445 }
446
447 if (reply) {
448 lifetime = PREQ_IE_LIFETIME(preq_elem);
449 ttl = ifsta->mshcfg.dot11MeshTTL;
450 if (ttl != 0)
451 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr,
452 cpu_to_le32(dst_dsn), 0, orig_addr,
453 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
454 cpu_to_le32(lifetime), cpu_to_le32(metric),
455 0, dev);
456 else
457 ifsta->mshstats.dropped_frames_ttl++;
458 }
459
460 if (forward) {
461 u32 preq_id;
462 u8 hopcount, flags;
463
464 ttl = PREQ_IE_TTL(preq_elem);
465 lifetime = PREQ_IE_LIFETIME(preq_elem);
466 if (ttl <= 1) {
467 ifsta->mshstats.dropped_frames_ttl++;
468 return;
469 }
470 --ttl;
471 flags = PREQ_IE_FLAGS(preq_elem);
472 preq_id = PREQ_IE_PREQ_ID(preq_elem);
473 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
474 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
475 cpu_to_le32(orig_dsn), dst_flags, dst_addr,
476 cpu_to_le32(dst_dsn), dev->broadcast,
477 hopcount, ttl, cpu_to_le32(lifetime),
478 cpu_to_le32(metric), cpu_to_le32(preq_id),
479 dev);
480 ifsta->mshstats.fwded_frames++;
481 }
482}
483
484
485static void hwmp_prep_frame_process(struct net_device *dev,
486 struct ieee80211_mgmt *mgmt,
487 u8 *prep_elem, u32 metric)
488{
489 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
490 struct mesh_path *mpath;
491 u8 *dst_addr, *orig_addr;
492 u8 ttl, hopcount, flags;
493 u8 next_hop[ETH_ALEN];
494 u32 dst_dsn, orig_dsn, lifetime;
495
496 /* Note that we divert from the draft nomenclature and denominate
497 * destination to what the draft refers to as origininator. So in this
498 * function destnation refers to the final destination of the PREP,
499 * which corresponds with the originator of the PREQ which this PREP
500 * replies
501 */
502 dst_addr = PREP_IE_DST_ADDR(prep_elem);
503 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0)
504 /* destination, no forwarding required */
505 return;
506
507 ttl = PREP_IE_TTL(prep_elem);
508 if (ttl <= 1) {
509 sdata->u.sta.mshstats.dropped_frames_ttl++;
510 return;
511 }
512
513 rcu_read_lock();
514 mpath = mesh_path_lookup(dst_addr, dev);
515 if (mpath)
516 spin_lock_bh(&mpath->state_lock);
517 else
518 goto fail;
519 if (!(mpath->flags & MESH_PATH_ACTIVE)) {
520 spin_unlock_bh(&mpath->state_lock);
521 goto fail;
522 }
523 memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN);
524 spin_unlock_bh(&mpath->state_lock);
525 --ttl;
526 flags = PREP_IE_FLAGS(prep_elem);
527 lifetime = PREP_IE_LIFETIME(prep_elem);
528 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
529 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
530 dst_dsn = PREP_IE_DST_DSN(prep_elem);
531 orig_dsn = PREP_IE_ORIG_DSN(prep_elem);
532
533 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
534 cpu_to_le32(orig_dsn), 0, dst_addr,
535 cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl,
536 cpu_to_le32(lifetime), cpu_to_le32(metric),
537 0, dev);
538 rcu_read_unlock();
539 sdata->u.sta.mshstats.fwded_frames++;
540 return;
541
542fail:
543 rcu_read_unlock();
544 sdata->u.sta.mshstats.dropped_frames_no_route++;
545 return;
546}
547
548static void hwmp_perr_frame_process(struct net_device *dev,
549 struct ieee80211_mgmt *mgmt, u8 *perr_elem)
550{
551 struct mesh_path *mpath;
552 u8 *ta, *dst_addr;
553 u32 dst_dsn;
554
555 ta = mgmt->sa;
556 dst_addr = PERR_IE_DST_ADDR(perr_elem);
557 dst_dsn = PERR_IE_DST_DSN(perr_elem);
558 rcu_read_lock();
559 mpath = mesh_path_lookup(dst_addr, dev);
560 if (mpath) {
561 spin_lock_bh(&mpath->state_lock);
562 if (mpath->flags & MESH_PATH_ACTIVE &&
563 memcmp(ta, mpath->next_hop->addr, ETH_ALEN) == 0 &&
564 (!(mpath->flags & MESH_PATH_DSN_VALID) ||
565 DSN_GT(dst_dsn, mpath->dsn))) {
566 mpath->flags &= ~MESH_PATH_ACTIVE;
567 mpath->dsn = dst_dsn;
568 spin_unlock_bh(&mpath->state_lock);
569 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
570 dev->broadcast, dev);
571 } else
572 spin_unlock_bh(&mpath->state_lock);
573 }
574 rcu_read_unlock();
575}
576
577
578
579void mesh_rx_path_sel_frame(struct net_device *dev,
580 struct ieee80211_mgmt *mgmt,
581 size_t len)
582{
583 struct ieee802_11_elems elems;
584 size_t baselen;
585 u32 last_hop_metric;
586
587 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
588 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
589 len - baselen, &elems);
590
591 switch (mgmt->u.action.u.mesh_action.action_code) {
592 case MPATH_PREQ:
593 if (!elems.preq || elems.preq_len != 37)
594 /* Right now we support just 1 destination and no AE */
595 return;
596 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq);
597 if (!last_hop_metric)
598 return;
599 hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric);
600 break;
601 case MPATH_PREP:
602 if (!elems.prep || elems.prep_len != 31)
603 /* Right now we support no AE */
604 return;
605 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep);
606 if (!last_hop_metric)
607 return;
608 hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric);
609 break;
610 case MPATH_PERR:
611 if (!elems.perr || elems.perr_len != 12)
612 /* Right now we support only one destination per PERR */
613 return;
614 hwmp_perr_frame_process(dev, mgmt, elems.perr);
615 default:
616 return;
617 }
618
619}
620
621/**
622 * mesh_queue_preq - queue a PREQ to a given destination
623 *
624 * @mpath: mesh path to discover
625 * @flags: special attributes of the PREQ to be sent
626 *
627 * Locking: the function must be called from within a rcu read lock block.
628 *
629 */
630static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
631{
632 struct ieee80211_sub_if_data *sdata =
633 IEEE80211_DEV_TO_SUB_IF(mpath->dev);
634 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
635 struct mesh_preq_queue *preq_node;
636
637 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
638 if (!preq_node) {
639 printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
640 return;
641 }
642
643 spin_lock(&ifsta->mesh_preq_queue_lock);
644 if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
645 spin_unlock(&ifsta->mesh_preq_queue_lock);
646 kfree(preq_node);
647 if (printk_ratelimit())
648 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
649 return;
650 }
651
652 memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
653 preq_node->flags = flags;
654
655 list_add_tail(&preq_node->list, &ifsta->preq_queue.list);
656 ++ifsta->preq_queue_len;
657 spin_unlock(&ifsta->mesh_preq_queue_lock);
658
659 if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata)))
660 queue_work(sdata->local->hw.workqueue, &ifsta->work);
661
662 else if (time_before(jiffies, ifsta->last_preq)) {
663 /* avoid long wait if did not send preqs for a long time
664 * and jiffies wrapped around
665 */
666 ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
667 queue_work(sdata->local->hw.workqueue, &ifsta->work);
668 } else
669 mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq +
670 min_preq_int_jiff(sdata));
671}
672
673/**
674 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
675 *
676 * @dev: local mesh interface
677 */
678void mesh_path_start_discovery(struct net_device *dev)
679{
680 struct ieee80211_sub_if_data *sdata =
681 IEEE80211_DEV_TO_SUB_IF(dev);
682 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
683 struct mesh_preq_queue *preq_node;
684 struct mesh_path *mpath;
685 u8 ttl, dst_flags;
686 u32 lifetime;
687
688 spin_lock(&ifsta->mesh_preq_queue_lock);
689 if (!ifsta->preq_queue_len ||
690 time_before(jiffies, ifsta->last_preq +
691 min_preq_int_jiff(sdata))) {
692 spin_unlock(&ifsta->mesh_preq_queue_lock);
693 return;
694 }
695
696 preq_node = list_first_entry(&ifsta->preq_queue.list,
697 struct mesh_preq_queue, list);
698 list_del(&preq_node->list);
699 --ifsta->preq_queue_len;
700 spin_unlock(&ifsta->mesh_preq_queue_lock);
701
702 rcu_read_lock();
703 mpath = mesh_path_lookup(preq_node->dst, dev);
704 if (!mpath)
705 goto enddiscovery;
706
707 spin_lock_bh(&mpath->state_lock);
708 if (preq_node->flags & PREQ_Q_F_START) {
709 if (mpath->flags & MESH_PATH_RESOLVING) {
710 spin_unlock_bh(&mpath->state_lock);
711 goto enddiscovery;
712 } else {
713 mpath->flags &= ~MESH_PATH_RESOLVED;
714 mpath->flags |= MESH_PATH_RESOLVING;
715 mpath->discovery_retries = 0;
716 mpath->discovery_timeout = disc_timeout_jiff(sdata);
717 }
718 } else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
719 mpath->flags & MESH_PATH_RESOLVED) {
720 mpath->flags &= ~MESH_PATH_RESOLVING;
721 spin_unlock_bh(&mpath->state_lock);
722 goto enddiscovery;
723 }
724
725 ifsta->last_preq = jiffies;
726
727 if (time_after(jiffies, ifsta->last_dsn_update +
728 net_traversal_jiffies(sdata)) ||
729 time_before(jiffies, ifsta->last_dsn_update)) {
730 ++ifsta->dsn;
731 sdata->u.sta.last_dsn_update = jiffies;
732 }
733 lifetime = default_lifetime(sdata);
734 ttl = sdata->u.sta.mshcfg.dot11MeshTTL;
735 if (ttl == 0) {
736 sdata->u.sta.mshstats.dropped_frames_ttl++;
737 spin_unlock_bh(&mpath->state_lock);
738 goto enddiscovery;
739 }
740
741 if (preq_node->flags & PREQ_Q_F_REFRESH)
742 dst_flags = MP_F_DO;
743 else
744 dst_flags = MP_F_RF;
745
746 spin_unlock_bh(&mpath->state_lock);
747 mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr,
748 cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst,
749 cpu_to_le32(mpath->dsn), dev->broadcast, 0,
750 ttl, cpu_to_le32(lifetime), 0,
751 cpu_to_le32(ifsta->preq_id++), dev);
752 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
753
754enddiscovery:
755 rcu_read_unlock();
756 kfree(preq_node);
757}
758
759/**
760 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame
761 *
762 * @next_hop: output argument for next hop address
763 * @skb: frame to be sent
764 * @dev: network device the frame will be sent through
765 *
766 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
767 * found, the function will start a path discovery and queue the frame so it is
768 * sent when the path is resolved. This means the caller must not free the skb
769 * in this case.
770 */
771int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb,
772 struct net_device *dev)
773{
774 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
775 struct sk_buff *skb_to_free = NULL;
776 struct mesh_path *mpath;
777 int err = 0;
778
779 rcu_read_lock();
780 mpath = mesh_path_lookup(skb->data, dev);
781
782 if (!mpath) {
783 mesh_path_add(skb->data, dev);
784 mpath = mesh_path_lookup(skb->data, dev);
785 if (!mpath) {
786 dev_kfree_skb(skb);
787 sdata->u.sta.mshstats.dropped_frames_no_route++;
788 err = -ENOSPC;
789 goto endlookup;
790 }
791 }
792
793 if (mpath->flags & MESH_PATH_ACTIVE) {
794 if (time_after(jiffies, mpath->exp_time -
795 msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time))
796 && skb->pkt_type != PACKET_OTHERHOST
797 && !(mpath->flags & MESH_PATH_RESOLVING)
798 && !(mpath->flags & MESH_PATH_FIXED)) {
799 mesh_queue_preq(mpath,
800 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
801 }
802 memcpy(next_hop, mpath->next_hop->addr,
803 ETH_ALEN);
804 } else {
805 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
806 /* Start discovery only if it is not running yet */
807 mesh_queue_preq(mpath, PREQ_Q_F_START);
808 }
809
810 if (skb_queue_len(&mpath->frame_queue) >=
811 MESH_FRAME_QUEUE_LEN) {
812 skb_to_free = mpath->frame_queue.next;
813 skb_unlink(skb_to_free, &mpath->frame_queue);
814 }
815
816 skb_queue_tail(&mpath->frame_queue, skb);
817 if (skb_to_free)
818 mesh_path_discard_frame(skb_to_free, dev);
819 err = -ENOENT;
820 }
821
822endlookup:
823 rcu_read_unlock();
824 return err;
825}
826
827void mesh_path_timer(unsigned long data)
828{
829 struct ieee80211_sub_if_data *sdata;
830 struct mesh_path *mpath;
831
832 rcu_read_lock();
833 mpath = (struct mesh_path *) data;
834 mpath = rcu_dereference(mpath);
835 if (!mpath)
836 goto endmpathtimer;
837 spin_lock_bh(&mpath->state_lock);
838 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
839 if (mpath->flags & MESH_PATH_RESOLVED ||
840 (!(mpath->flags & MESH_PATH_RESOLVING)))
841 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
842 else if (mpath->discovery_retries < max_preq_retries(sdata)) {
843 ++mpath->discovery_retries;
844 mpath->discovery_timeout *= 2;
845 mesh_queue_preq(mpath, 0);
846 } else {
847 mpath->flags = 0;
848 mpath->exp_time = jiffies;
849 mesh_path_flush_pending(mpath);
850 }
851
852 spin_unlock_bh(&mpath->state_lock);
853endmpathtimer:
854 rcu_read_unlock();
855}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
new file mode 100644
index 000000000000..5845dc21ce85
--- /dev/null
+++ b/net/mac80211/mesh_pathtbl.c
@@ -0,0 +1,516 @@
1/*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/list.h>
12#include <linux/netdevice.h>
13#include <linux/random.h>
14#include <linux/spinlock.h>
15#include <linux/string.h>
16#include <net/mac80211.h>
17#include "ieee80211_i.h"
18#include "mesh.h"
19
20/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21#define INIT_PATHS_SIZE_ORDER 2
22
23/* Keep the mean chain length below this constant */
24#define MEAN_CHAIN_LEN 2
25
26#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 time_after(jiffies, mpath->exp_time) && \
28 !(mpath->flags & MESH_PATH_FIXED))
29
30struct mpath_node {
31 struct hlist_node list;
32 struct rcu_head rcu;
33 /* This indirection allows two different tables to point to the same
34 * mesh_path structure, useful when resizing
35 */
36 struct mesh_path *mpath;
37};
38
39static struct mesh_table *mesh_paths;
40
41/* This lock will have the grow table function as writer and add / delete nodes
42 * as readers. When reading the table (i.e. doing lookups) we are well protected
43 * by RCU
44 */
45static DEFINE_RWLOCK(pathtbl_resize_lock);
46
47/**
48 *
49 * mesh_path_assign_nexthop - update mesh path next hop
50 *
51 * @mpath: mesh path to update
52 * @sta: next hop to assign
53 *
54 * Locking: mpath->state_lock must be held when calling this function
55 */
56void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
57{
58 rcu_assign_pointer(mpath->next_hop, sta);
59}
60
61
62/**
63 * mesh_path_lookup - look up a path in the mesh path table
64 * @dst: hardware address (ETH_ALEN length) of destination
65 * @dev: local interface
66 *
67 * Returns: pointer to the mesh path structure, or NULL if not found
68 *
69 * Locking: must be called within a read rcu section.
70 */
71struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
72{
73 struct mesh_path *mpath;
74 struct hlist_node *n;
75 struct hlist_head *bucket;
76 struct mesh_table *tbl;
77 struct mpath_node *node;
78
79 tbl = rcu_dereference(mesh_paths);
80
81 bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)];
82 hlist_for_each_entry_rcu(node, n, bucket, list) {
83 mpath = node->mpath;
84 if (mpath->dev == dev &&
85 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
86 if (MPATH_EXPIRED(mpath)) {
87 spin_lock_bh(&mpath->state_lock);
88 if (MPATH_EXPIRED(mpath))
89 mpath->flags &= ~MESH_PATH_ACTIVE;
90 spin_unlock_bh(&mpath->state_lock);
91 }
92 return mpath;
93 }
94 }
95 return NULL;
96}
97
98/**
99 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
100 * @idx: index
101 * @dev: local interface, or NULL for all entries
102 *
103 * Returns: pointer to the mesh path structure, or NULL if not found.
104 *
105 * Locking: must be called within a read rcu section.
106 */
107struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
108{
109 struct mpath_node *node;
110 struct hlist_node *p;
111 int i;
112 int j = 0;
113
114 for_each_mesh_entry(mesh_paths, p, node, i) {
115 if (dev && node->mpath->dev != dev)
116 continue;
117 if (j++ == idx) {
118 if (MPATH_EXPIRED(node->mpath)) {
119 spin_lock_bh(&node->mpath->state_lock);
120 if (MPATH_EXPIRED(node->mpath))
121 node->mpath->flags &= ~MESH_PATH_ACTIVE;
122 spin_unlock_bh(&node->mpath->state_lock);
123 }
124 return node->mpath;
125 }
126 }
127
128 return NULL;
129}
130
131/**
132 * mesh_path_add - allocate and add a new path to the mesh path table
133 * @addr: destination address of the path (ETH_ALEN length)
134 * @dev: local interface
135 *
136 * Returns: 0 on sucess
137 *
138 * State: the initial state of the new path is set to 0
139 */
140int mesh_path_add(u8 *dst, struct net_device *dev)
141{
142 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
143 struct mesh_path *mpath, *new_mpath;
144 struct mpath_node *node, *new_node;
145 struct hlist_head *bucket;
146 struct hlist_node *n;
147 int grow = 0;
148 int err = 0;
149 u32 hash_idx;
150
151 if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0)
152 /* never add ourselves as neighbours */
153 return -ENOTSUPP;
154
155 if (is_multicast_ether_addr(dst))
156 return -ENOTSUPP;
157
158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 return -ENOSPC;
160
161 read_lock(&pathtbl_resize_lock);
162
163 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
164 if (!new_mpath) {
165 atomic_dec(&sdata->u.sta.mpaths);
166 err = -ENOMEM;
167 goto endadd2;
168 }
169 memcpy(new_mpath->dst, dst, ETH_ALEN);
170 new_mpath->dev = dev;
171 new_mpath->flags = 0;
172 skb_queue_head_init(&new_mpath->frame_queue);
173 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
174 new_node->mpath = new_mpath;
175 new_mpath->timer.data = (unsigned long) new_mpath;
176 new_mpath->timer.function = mesh_path_timer;
177 new_mpath->exp_time = jiffies;
178 spin_lock_init(&new_mpath->state_lock);
179 init_timer(&new_mpath->timer);
180
181 hash_idx = mesh_table_hash(dst, dev, mesh_paths);
182 bucket = &mesh_paths->hash_buckets[hash_idx];
183
184 spin_lock(&mesh_paths->hashwlock[hash_idx]);
185
186 hlist_for_each_entry(node, n, bucket, list) {
187 mpath = node->mpath;
188 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN)
189 == 0) {
190 err = -EEXIST;
191 atomic_dec(&sdata->u.sta.mpaths);
192 kfree(new_node);
193 kfree(new_mpath);
194 goto endadd;
195 }
196 }
197
198 hlist_add_head_rcu(&new_node->list, bucket);
199 if (atomic_inc_return(&mesh_paths->entries) >=
200 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
201 grow = 1;
202
203endadd:
204 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
205endadd2:
206 read_unlock(&pathtbl_resize_lock);
207 if (!err && grow) {
208 struct mesh_table *oldtbl, *newtbl;
209
210 write_lock(&pathtbl_resize_lock);
211 oldtbl = mesh_paths;
212 newtbl = mesh_table_grow(mesh_paths);
213 if (!newtbl) {
214 write_unlock(&pathtbl_resize_lock);
215 return -ENOMEM;
216 }
217 rcu_assign_pointer(mesh_paths, newtbl);
218 synchronize_rcu();
219 mesh_table_free(oldtbl, false);
220 write_unlock(&pathtbl_resize_lock);
221 }
222 return err;
223}
224
225
226/**
227 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
228 *
229 * @sta: broken peer link
230 *
231 * This function must be called from the rate control algorithm if enough
232 * delivery errors suggest that a peer link is no longer usable.
233 */
234void mesh_plink_broken(struct sta_info *sta)
235{
236 struct mesh_path *mpath;
237 struct mpath_node *node;
238 struct hlist_node *p;
239 struct net_device *dev = sta->sdata->dev;
240 int i;
241
242 rcu_read_lock();
243 for_each_mesh_entry(mesh_paths, p, node, i) {
244 mpath = node->mpath;
245 spin_lock_bh(&mpath->state_lock);
246 if (mpath->next_hop == sta &&
247 mpath->flags & MESH_PATH_ACTIVE &&
248 !(mpath->flags & MESH_PATH_FIXED)) {
249 mpath->flags &= ~MESH_PATH_ACTIVE;
250 ++mpath->dsn;
251 spin_unlock_bh(&mpath->state_lock);
252 mesh_path_error_tx(mpath->dst,
253 cpu_to_le32(mpath->dsn),
254 dev->broadcast, dev);
255 } else
256 spin_unlock_bh(&mpath->state_lock);
257 }
258 rcu_read_unlock();
259}
260EXPORT_SYMBOL(mesh_plink_broken);
261
262/**
263 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
264 *
265 * @sta - mesh peer to match
266 *
267 * RCU notes: this function is called when a mesh plink transitions from
268 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
269 * allows path creation. This will happen before the sta can be freed (because
270 * sta_info_destroy() calls this) so any reader in a rcu read block will be
271 * protected against the plink disappearing.
272 */
273void mesh_path_flush_by_nexthop(struct sta_info *sta)
274{
275 struct mesh_path *mpath;
276 struct mpath_node *node;
277 struct hlist_node *p;
278 int i;
279
280 for_each_mesh_entry(mesh_paths, p, node, i) {
281 mpath = node->mpath;
282 if (mpath->next_hop == sta)
283 mesh_path_del(mpath->dst, mpath->dev);
284 }
285}
286
287void mesh_path_flush(struct net_device *dev)
288{
289 struct mesh_path *mpath;
290 struct mpath_node *node;
291 struct hlist_node *p;
292 int i;
293
294 for_each_mesh_entry(mesh_paths, p, node, i) {
295 mpath = node->mpath;
296 if (mpath->dev == dev)
297 mesh_path_del(mpath->dst, mpath->dev);
298 }
299}
300
301static void mesh_path_node_reclaim(struct rcu_head *rp)
302{
303 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
304 struct ieee80211_sub_if_data *sdata =
305 IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
306
307 del_timer_sync(&node->mpath->timer);
308 atomic_dec(&sdata->u.sta.mpaths);
309 kfree(node->mpath);
310 kfree(node);
311}
312
313/**
314 * mesh_path_del - delete a mesh path from the table
315 *
316 * @addr: dst address (ETH_ALEN length)
317 * @dev: local interface
318 *
319 * Returns: 0 if succesful
320 */
321int mesh_path_del(u8 *addr, struct net_device *dev)
322{
323 struct mesh_path *mpath;
324 struct mpath_node *node;
325 struct hlist_head *bucket;
326 struct hlist_node *n;
327 int hash_idx;
328 int err = 0;
329
330 read_lock(&pathtbl_resize_lock);
331 hash_idx = mesh_table_hash(addr, dev, mesh_paths);
332 bucket = &mesh_paths->hash_buckets[hash_idx];
333
334 spin_lock(&mesh_paths->hashwlock[hash_idx]);
335 hlist_for_each_entry(node, n, bucket, list) {
336 mpath = node->mpath;
337 if (mpath->dev == dev &&
338 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
339 spin_lock_bh(&mpath->state_lock);
340 mpath->flags |= MESH_PATH_RESOLVING;
341 hlist_del_rcu(&node->list);
342 call_rcu(&node->rcu, mesh_path_node_reclaim);
343 atomic_dec(&mesh_paths->entries);
344 spin_unlock_bh(&mpath->state_lock);
345 goto enddel;
346 }
347 }
348
349 err = -ENXIO;
350enddel:
351 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
352 read_unlock(&pathtbl_resize_lock);
353 return err;
354}
355
356/**
357 * mesh_path_tx_pending - sends pending frames in a mesh path queue
358 *
359 * @mpath: mesh path to activate
360 *
361 * Locking: the state_lock of the mpath structure must NOT be held when calling
362 * this function.
363 */
364void mesh_path_tx_pending(struct mesh_path *mpath)
365{
366 struct sk_buff *skb;
367
368 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
369 (mpath->flags & MESH_PATH_ACTIVE))
370 dev_queue_xmit(skb);
371}
372
373/**
374 * mesh_path_discard_frame - discard a frame whose path could not be resolved
375 *
376 * @skb: frame to discard
377 * @dev: network device the frame was to be sent through
378 *
379 * If the frame was beign forwarded from another MP, a PERR frame will be sent
380 * to the precursor.
381 *
382 * Locking: the function must me called within a rcu_read_lock region
383 */
384void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
385{
386 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
387 struct mesh_path *mpath;
388 u32 dsn = 0;
389
390 if (skb->pkt_type == PACKET_OTHERHOST) {
391 struct ieee80211s_hdr *prev_meshhdr;
392 int mshhdrlen;
393 u8 *ra, *da;
394
395 prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb);
396 mshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr);
397 da = skb->data;
398 ra = MESH_PREQ(skb);
399 mpath = mesh_path_lookup(da, dev);
400 if (mpath)
401 dsn = ++mpath->dsn;
402 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev);
403 }
404
405 kfree_skb(skb);
406 sdata->u.sta.mshstats.dropped_frames_no_route++;
407}
408
409/**
410 * mesh_path_flush_pending - free the pending queue of a mesh path
411 *
412 * @mpath: mesh path whose queue has to be freed
413 *
414 * Locking: the function must me called withing a rcu_read_lock region
415 */
416void mesh_path_flush_pending(struct mesh_path *mpath)
417{
418 struct ieee80211_sub_if_data *sdata;
419 struct sk_buff *skb;
420
421 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
422
423 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
424 (mpath->flags & MESH_PATH_ACTIVE))
425 mesh_path_discard_frame(skb, mpath->dev);
426}
427
428/**
429 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
430 *
431 * @mpath: the mesh path to modify
432 * @next_hop: the next hop to force
433 *
434 * Locking: this function must be called holding mpath->state_lock
435 */
436void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
437{
438 spin_lock_bh(&mpath->state_lock);
439 mesh_path_assign_nexthop(mpath, next_hop);
440 mpath->dsn = 0xffff;
441 mpath->metric = 0;
442 mpath->hop_count = 0;
443 mpath->exp_time = 0;
444 mpath->flags |= MESH_PATH_FIXED;
445 mesh_path_activate(mpath);
446 spin_unlock_bh(&mpath->state_lock);
447 mesh_path_tx_pending(mpath);
448}
449
450static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
451{
452 struct mesh_path *mpath;
453 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
454 mpath = node->mpath;
455 hlist_del_rcu(p);
456 synchronize_rcu();
457 if (free_leafs)
458 kfree(mpath);
459 kfree(node);
460}
461
462static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
463{
464 struct mesh_path *mpath;
465 struct mpath_node *node, *new_node;
466 u32 hash_idx;
467
468 node = hlist_entry(p, struct mpath_node, list);
469 mpath = node->mpath;
470 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
471 new_node->mpath = mpath;
472 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
473 hlist_add_head(&new_node->list,
474 &newtbl->hash_buckets[hash_idx]);
475}
476
477int mesh_pathtbl_init(void)
478{
479 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
480 mesh_paths->free_node = &mesh_path_node_free;
481 mesh_paths->copy_node = &mesh_path_node_copy;
482 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
483 if (!mesh_paths)
484 return -ENOMEM;
485 return 0;
486}
487
488void mesh_path_expire(struct net_device *dev)
489{
490 struct mesh_path *mpath;
491 struct mpath_node *node;
492 struct hlist_node *p;
493 int i;
494
495 read_lock(&pathtbl_resize_lock);
496 for_each_mesh_entry(mesh_paths, p, node, i) {
497 if (node->mpath->dev != dev)
498 continue;
499 mpath = node->mpath;
500 spin_lock_bh(&mpath->state_lock);
501 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
502 (!(mpath->flags & MESH_PATH_FIXED)) &&
503 time_after(jiffies,
504 mpath->exp_time + MESH_PATH_EXPIRE)) {
505 spin_unlock_bh(&mpath->state_lock);
506 mesh_path_del(mpath->dst, mpath->dev);
507 } else
508 spin_unlock_bh(&mpath->state_lock);
509 }
510 read_unlock(&pathtbl_resize_lock);
511}
512
513void mesh_pathtbl_unregister(void)
514{
515 mesh_table_free(mesh_paths, true);
516}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
new file mode 100644
index 000000000000..37f0c2b94ae7
--- /dev/null
+++ b/net/mac80211/mesh_plink.c
@@ -0,0 +1,762 @@
1/*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/kernel.h>
10#include <linux/random.h>
11#include "ieee80211_i.h"
12#include "rate.h"
13#include "mesh.h"
14
15#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
16#define mpl_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
17#else
18#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
19#endif
20
21#define PLINK_GET_FRAME_SUBTYPE(p) (p)
22#define PLINK_GET_LLID(p) (p + 1)
23#define PLINK_GET_PLID(p) (p + 3)
24
25#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
26 jiffies + HZ * t / 1000))
27
28/* Peer link cancel reasons, all subject to ANA approval */
29#define MESH_LINK_CANCELLED 2
30#define MESH_MAX_NEIGHBORS 3
31#define MESH_CAPABILITY_POLICY_VIOLATION 4
32#define MESH_CLOSE_RCVD 5
33#define MESH_MAX_RETRIES 6
34#define MESH_CONFIRM_TIMEOUT 7
35#define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8
36#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9
37#define MESH_SECURITY_FAILED_VERIFICATION 10
38
39#define dot11MeshMaxRetries(s) (s->u.sta.mshcfg.dot11MeshMaxRetries)
40#define dot11MeshRetryTimeout(s) (s->u.sta.mshcfg.dot11MeshRetryTimeout)
41#define dot11MeshConfirmTimeout(s) (s->u.sta.mshcfg.dot11MeshConfirmTimeout)
42#define dot11MeshHoldingTimeout(s) (s->u.sta.mshcfg.dot11MeshHoldingTimeout)
43#define dot11MeshMaxPeerLinks(s) (s->u.sta.mshcfg.dot11MeshMaxPeerLinks)
44
45enum plink_frame_type {
46 PLINK_OPEN = 0,
47 PLINK_CONFIRM,
48 PLINK_CLOSE
49};
50
51enum plink_event {
52 PLINK_UNDEFINED,
53 OPN_ACPT,
54 OPN_RJCT,
55 OPN_IGNR,
56 CNF_ACPT,
57 CNF_RJCT,
58 CNF_IGNR,
59 CLS_ACPT,
60 CLS_IGNR
61};
62
63static inline
64void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
65{
66 atomic_inc(&sdata->u.sta.mshstats.estab_plinks);
67 mesh_accept_plinks_update(sdata);
68}
69
70static inline
71void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
72{
73 atomic_dec(&sdata->u.sta.mshstats.estab_plinks);
74 mesh_accept_plinks_update(sdata);
75}
76
77/**
78 * mesh_plink_fsm_restart - restart a mesh peer link finite state machine
79 *
80 * @sta: mes peer link to restart
81 *
82 * Locking: this function must be called holding sta->plink_lock
83 */
84static inline void mesh_plink_fsm_restart(struct sta_info *sta)
85{
86 sta->plink_state = PLINK_LISTEN;
87 sta->llid = sta->plid = sta->reason = 0;
88 sta->plink_retries = 0;
89}
90
91/*
92 * NOTE: This is just an alias for sta_info_alloc(), see notes
93 * on it in the lifecycle management section!
94 */
95static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
96 u8 *hw_addr, u64 rates)
97{
98 struct ieee80211_local *local = sdata->local;
99 struct sta_info *sta;
100
101 if (local->num_sta >= MESH_MAX_PLINKS)
102 return NULL;
103
104 sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC);
105 if (!sta)
106 return NULL;
107
108 sta->flags |= WLAN_STA_AUTHORIZED;
109 sta->supp_rates[local->hw.conf.channel->band] = rates;
110
111 return sta;
112}
113
114/**
115 * mesh_plink_deactivate - deactivate mesh peer link
116 *
117 * @sta: mesh peer link to deactivate
118 *
119 * All mesh paths with this peer as next hop will be flushed
120 *
121 * Locking: the caller must hold sta->plink_lock
122 */
123static void __mesh_plink_deactivate(struct sta_info *sta)
124{
125 struct ieee80211_sub_if_data *sdata = sta->sdata;
126
127 if (sta->plink_state == PLINK_ESTAB)
128 mesh_plink_dec_estab_count(sdata);
129 sta->plink_state = PLINK_BLOCKED;
130 mesh_path_flush_by_nexthop(sta);
131}
132
133/**
134 * __mesh_plink_deactivate - deactivate mesh peer link
135 *
136 * @sta: mesh peer link to deactivate
137 *
138 * All mesh paths with this peer as next hop will be flushed
139 */
140void mesh_plink_deactivate(struct sta_info *sta)
141{
142 spin_lock_bh(&sta->plink_lock);
143 __mesh_plink_deactivate(sta);
144 spin_unlock_bh(&sta->plink_lock);
145}
146
147static int mesh_plink_frame_tx(struct net_device *dev,
148 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
149 __le16 reason) {
150 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
151 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
152 struct ieee80211_mgmt *mgmt;
153 bool include_plid = false;
154 u8 *pos;
155 int ie_len;
156
157 if (!skb)
158 return -1;
159 skb_reserve(skb, local->hw.extra_tx_headroom);
160 /* 25 is the size of the common mgmt part (24) plus the size of the
161 * common action part (1)
162 */
163 mgmt = (struct ieee80211_mgmt *)
164 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action));
165 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action));
166 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
167 IEEE80211_STYPE_ACTION);
168 memcpy(mgmt->da, da, ETH_ALEN);
169 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
170 /* BSSID is left zeroed, wildcard value */
171 mgmt->u.action.category = PLINK_CATEGORY;
172 mgmt->u.action.u.plink_action.action_code = action;
173
174 if (action == PLINK_CLOSE)
175 mgmt->u.action.u.plink_action.aux = reason;
176 else {
177 mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0);
178 if (action == PLINK_CONFIRM) {
179 pos = skb_put(skb, 4);
180 /* two-byte status code followed by two-byte AID */
181 memset(pos, 0, 4);
182 }
183 mesh_mgmt_ies_add(skb, dev);
184 }
185
186 /* Add Peer Link Management element */
187 switch (action) {
188 case PLINK_OPEN:
189 ie_len = 3;
190 break;
191 case PLINK_CONFIRM:
192 ie_len = 5;
193 include_plid = true;
194 break;
195 case PLINK_CLOSE:
196 default:
197 if (!plid)
198 ie_len = 5;
199 else {
200 ie_len = 7;
201 include_plid = true;
202 }
203 break;
204 }
205
206 pos = skb_put(skb, 2 + ie_len);
207 *pos++ = WLAN_EID_PEER_LINK;
208 *pos++ = ie_len;
209 *pos++ = action;
210 memcpy(pos, &llid, 2);
211 if (include_plid) {
212 pos += 2;
213 memcpy(pos, &plid, 2);
214 }
215 if (action == PLINK_CLOSE) {
216 pos += 2;
217 memcpy(pos, &reason, 2);
218 }
219
220 ieee80211_sta_tx(dev, skb, 0);
221 return 0;
222}
223
224void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev,
225 bool peer_accepting_plinks)
226{
227 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
228 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
229 struct sta_info *sta;
230
231 rcu_read_lock();
232
233 sta = sta_info_get(local, hw_addr);
234 if (!sta) {
235 sta = mesh_plink_alloc(sdata, hw_addr, rates);
236 if (!sta) {
237 rcu_read_unlock();
238 return;
239 }
240 if (sta_info_insert(sta)) {
241 rcu_read_unlock();
242 return;
243 }
244 }
245
246 sta->last_rx = jiffies;
247 sta->supp_rates[local->hw.conf.channel->band] = rates;
248 if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN &&
249 sdata->u.sta.accepting_plinks &&
250 sdata->u.sta.mshcfg.auto_open_plinks)
251 mesh_plink_open(sta);
252
253 rcu_read_unlock();
254}
255
256static void mesh_plink_timer(unsigned long data)
257{
258 struct sta_info *sta;
259 __le16 llid, plid, reason;
260 struct net_device *dev = NULL;
261 struct ieee80211_sub_if_data *sdata;
262#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
263 DECLARE_MAC_BUF(mac);
264#endif
265
266 /*
267 * This STA is valid because sta_info_destroy() will
268 * del_timer_sync() this timer after having made sure
269 * it cannot be readded (by deleting the plink.)
270 */
271 sta = (struct sta_info *) data;
272
273 spin_lock_bh(&sta->plink_lock);
274 if (sta->ignore_plink_timer) {
275 sta->ignore_plink_timer = false;
276 spin_unlock_bh(&sta->plink_lock);
277 return;
278 }
279 mpl_dbg("Mesh plink timer for %s fired on state %d\n",
280 print_mac(mac, sta->addr), sta->plink_state);
281 reason = 0;
282 llid = sta->llid;
283 plid = sta->plid;
284 sdata = sta->sdata;
285 dev = sdata->dev;
286
287 switch (sta->plink_state) {
288 case PLINK_OPN_RCVD:
289 case PLINK_OPN_SNT:
290 /* retry timer */
291 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
292 u32 rand;
293 mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n",
294 print_mac(mac, sta->addr),
295 sta->plink_retries, sta->plink_timeout);
296 get_random_bytes(&rand, sizeof(u32));
297 sta->plink_timeout = sta->plink_timeout +
298 rand % sta->plink_timeout;
299 ++sta->plink_retries;
300 mod_plink_timer(sta, sta->plink_timeout);
301 spin_unlock_bh(&sta->plink_lock);
302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
303 0, 0);
304 break;
305 }
306 reason = cpu_to_le16(MESH_MAX_RETRIES);
307 /* fall through on else */
308 case PLINK_CNF_RCVD:
309 /* confirm timer */
310 if (!reason)
311 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
312 sta->plink_state = PLINK_HOLDING;
313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
314 spin_unlock_bh(&sta->plink_lock);
315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid,
316 reason);
317 break;
318 case PLINK_HOLDING:
319 /* holding timer */
320 del_timer(&sta->plink_timer);
321 mesh_plink_fsm_restart(sta);
322 spin_unlock_bh(&sta->plink_lock);
323 break;
324 default:
325 spin_unlock_bh(&sta->plink_lock);
326 break;
327 }
328}
329
330static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
331{
332 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000);
333 sta->plink_timer.data = (unsigned long) sta;
334 sta->plink_timer.function = mesh_plink_timer;
335 sta->plink_timeout = timeout;
336 add_timer(&sta->plink_timer);
337}
338
339int mesh_plink_open(struct sta_info *sta)
340{
341 __le16 llid;
342 struct ieee80211_sub_if_data *sdata = sta->sdata;
343#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
344 DECLARE_MAC_BUF(mac);
345#endif
346
347 spin_lock_bh(&sta->plink_lock);
348 get_random_bytes(&llid, 2);
349 sta->llid = llid;
350 if (sta->plink_state != PLINK_LISTEN) {
351 spin_unlock_bh(&sta->plink_lock);
352 return -EBUSY;
353 }
354 sta->plink_state = PLINK_OPN_SNT;
355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
356 spin_unlock_bh(&sta->plink_lock);
357 mpl_dbg("Mesh plink: starting establishment with %s\n",
358 print_mac(mac, sta->addr));
359
360 return mesh_plink_frame_tx(sdata->dev, PLINK_OPEN,
361 sta->addr, llid, 0, 0);
362}
363
364void mesh_plink_block(struct sta_info *sta)
365{
366#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
367 DECLARE_MAC_BUF(mac);
368#endif
369
370 spin_lock_bh(&sta->plink_lock);
371 __mesh_plink_deactivate(sta);
372 sta->plink_state = PLINK_BLOCKED;
373 spin_unlock_bh(&sta->plink_lock);
374}
375
376int mesh_plink_close(struct sta_info *sta)
377{
378 struct ieee80211_sub_if_data *sdata = sta->sdata;
379 __le16 llid, plid, reason;
380#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
381 DECLARE_MAC_BUF(mac);
382#endif
383
384 mpl_dbg("Mesh plink: closing link with %s\n",
385 print_mac(mac, sta->addr));
386 spin_lock_bh(&sta->plink_lock);
387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED);
388 reason = sta->reason;
389
390 if (sta->plink_state == PLINK_LISTEN ||
391 sta->plink_state == PLINK_BLOCKED) {
392 mesh_plink_fsm_restart(sta);
393 spin_unlock_bh(&sta->plink_lock);
394 return 0;
395 } else if (sta->plink_state == PLINK_ESTAB) {
396 __mesh_plink_deactivate(sta);
397 /* The timer should not be running */
398 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
399 } else if (!mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)))
400 sta->ignore_plink_timer = true;
401
402 sta->plink_state = PLINK_HOLDING;
403 llid = sta->llid;
404 plid = sta->plid;
405 spin_unlock_bh(&sta->plink_lock);
406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid,
407 plid, reason);
408 return 0;
409}
410
411void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
412 size_t len, struct ieee80211_rx_status *rx_status)
413{
414 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
415 struct ieee80211_local *local = sdata->local;
416 struct ieee802_11_elems elems;
417 struct sta_info *sta;
418 enum plink_event event;
419 enum plink_frame_type ftype;
420 size_t baselen;
421 u8 ie_len;
422 u8 *baseaddr;
423 __le16 plid, llid, reason;
424#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
425 DECLARE_MAC_BUF(mac);
426#endif
427
428 if (is_multicast_ether_addr(mgmt->da)) {
429 mpl_dbg("Mesh plink: ignore frame from multicast address");
430 return;
431 }
432
433 baseaddr = mgmt->u.action.u.plink_action.variable;
434 baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt;
435 if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) {
436 baseaddr += 4;
437 baselen -= 4;
438 }
439 ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
440 if (!elems.peer_link) {
441 mpl_dbg("Mesh plink: missing necessary peer link ie\n");
442 return;
443 }
444
445 ftype = *((u8 *)PLINK_GET_FRAME_SUBTYPE(elems.peer_link));
446 ie_len = elems.peer_link_len;
447 if ((ftype == PLINK_OPEN && ie_len != 3) ||
448 (ftype == PLINK_CONFIRM && ie_len != 5) ||
449 (ftype == PLINK_CLOSE && ie_len != 5 && ie_len != 7)) {
450 mpl_dbg("Mesh plink: incorrect plink ie length\n");
451 return;
452 }
453
454 if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) {
455 mpl_dbg("Mesh plink: missing necessary ie\n");
456 return;
457 }
458 /* Note the lines below are correct, the llid in the frame is the plid
459 * from the point of view of this host.
460 */
461 memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2);
462 if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 7))
463 memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2);
464
465 rcu_read_lock();
466
467 sta = sta_info_get(local, mgmt->sa);
468 if (!sta && ftype != PLINK_OPEN) {
469 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
470 rcu_read_unlock();
471 return;
472 }
473
474 if (sta && sta->plink_state == PLINK_BLOCKED) {
475 rcu_read_unlock();
476 return;
477 }
478
479 /* Now we will figure out the appropriate event... */
480 event = PLINK_UNDEFINED;
481 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, dev))) {
482 switch (ftype) {
483 case PLINK_OPEN:
484 event = OPN_RJCT;
485 break;
486 case PLINK_CONFIRM:
487 event = CNF_RJCT;
488 break;
489 case PLINK_CLOSE:
490 /* avoid warning */
491 break;
492 }
493 spin_lock_bh(&sta->plink_lock);
494 } else if (!sta) {
495 /* ftype == PLINK_OPEN */
496 u64 rates;
497 if (!mesh_plink_free_count(sdata)) {
498 mpl_dbg("Mesh plink error: no more free plinks\n");
499 rcu_read_unlock();
500 return;
501 }
502
503 rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
504 sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
505 if (!sta) {
506 mpl_dbg("Mesh plink error: plink table full\n");
507 rcu_read_unlock();
508 return;
509 }
510 if (sta_info_insert(sta)) {
511 rcu_read_unlock();
512 return;
513 }
514 event = OPN_ACPT;
515 spin_lock_bh(&sta->plink_lock);
516 } else {
517 spin_lock_bh(&sta->plink_lock);
518 switch (ftype) {
519 case PLINK_OPEN:
520 if (!mesh_plink_free_count(sdata) ||
521 (sta->plid && sta->plid != plid))
522 event = OPN_IGNR;
523 else
524 event = OPN_ACPT;
525 break;
526 case PLINK_CONFIRM:
527 if (!mesh_plink_free_count(sdata) ||
528 (sta->llid != llid || sta->plid != plid))
529 event = CNF_IGNR;
530 else
531 event = CNF_ACPT;
532 break;
533 case PLINK_CLOSE:
534 if (sta->plink_state == PLINK_ESTAB)
535 /* Do not check for llid or plid. This does not
536 * follow the standard but since multiple plinks
537 * per sta are not supported, it is necessary in
538 * order to avoid a livelock when MP A sees an
539 * establish peer link to MP B but MP B does not
540 * see it. This can be caused by a timeout in
541 * B's peer link establishment or B beign
542 * restarted.
543 */
544 event = CLS_ACPT;
545 else if (sta->plid != plid)
546 event = CLS_IGNR;
547 else if (ie_len == 7 && sta->llid != llid)
548 event = CLS_IGNR;
549 else
550 event = CLS_ACPT;
551 break;
552 default:
553 mpl_dbg("Mesh plink: unknown frame subtype\n");
554 spin_unlock_bh(&sta->plink_lock);
555 rcu_read_unlock();
556 return;
557 }
558 }
559
560 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %s %d %d %d %d\n",
561 print_mac(mac, mgmt->sa), sta->plink_state,
562 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
563 event);
564 reason = 0;
565 switch (sta->plink_state) {
566 /* spin_unlock as soon as state is updated at each case */
567 case PLINK_LISTEN:
568 switch (event) {
569 case CLS_ACPT:
570 mesh_plink_fsm_restart(sta);
571 spin_unlock_bh(&sta->plink_lock);
572 break;
573 case OPN_ACPT:
574 sta->plink_state = PLINK_OPN_RCVD;
575 sta->plid = plid;
576 get_random_bytes(&llid, 2);
577 sta->llid = llid;
578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
579 spin_unlock_bh(&sta->plink_lock);
580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
581 0, 0);
582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr,
583 llid, plid, 0);
584 break;
585 default:
586 spin_unlock_bh(&sta->plink_lock);
587 break;
588 }
589 break;
590
591 case PLINK_OPN_SNT:
592 switch (event) {
593 case OPN_RJCT:
594 case CNF_RJCT:
595 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
596 case CLS_ACPT:
597 if (!reason)
598 reason = cpu_to_le16(MESH_CLOSE_RCVD);
599 sta->reason = reason;
600 sta->plink_state = PLINK_HOLDING;
601 if (!mod_plink_timer(sta,
602 dot11MeshHoldingTimeout(sdata)))
603 sta->ignore_plink_timer = true;
604
605 llid = sta->llid;
606 spin_unlock_bh(&sta->plink_lock);
607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
608 plid, reason);
609 break;
610 case OPN_ACPT:
611 /* retry timer is left untouched */
612 sta->plink_state = PLINK_OPN_RCVD;
613 sta->plid = plid;
614 llid = sta->llid;
615 spin_unlock_bh(&sta->plink_lock);
616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
617 plid, 0);
618 break;
619 case CNF_ACPT:
620 sta->plink_state = PLINK_CNF_RCVD;
621 if (!mod_plink_timer(sta,
622 dot11MeshConfirmTimeout(sdata)))
623 sta->ignore_plink_timer = true;
624
625 spin_unlock_bh(&sta->plink_lock);
626 break;
627 default:
628 spin_unlock_bh(&sta->plink_lock);
629 break;
630 }
631 break;
632
633 case PLINK_OPN_RCVD:
634 switch (event) {
635 case OPN_RJCT:
636 case CNF_RJCT:
637 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
638 case CLS_ACPT:
639 if (!reason)
640 reason = cpu_to_le16(MESH_CLOSE_RCVD);
641 sta->reason = reason;
642 sta->plink_state = PLINK_HOLDING;
643 if (!mod_plink_timer(sta,
644 dot11MeshHoldingTimeout(sdata)))
645 sta->ignore_plink_timer = true;
646
647 llid = sta->llid;
648 spin_unlock_bh(&sta->plink_lock);
649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
650 plid, reason);
651 break;
652 case OPN_ACPT:
653 llid = sta->llid;
654 spin_unlock_bh(&sta->plink_lock);
655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
656 plid, 0);
657 break;
658 case CNF_ACPT:
659 del_timer(&sta->plink_timer);
660 sta->plink_state = PLINK_ESTAB;
661 mesh_plink_inc_estab_count(sdata);
662 spin_unlock_bh(&sta->plink_lock);
663 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
664 print_mac(mac, sta->addr));
665 break;
666 default:
667 spin_unlock_bh(&sta->plink_lock);
668 break;
669 }
670 break;
671
672 case PLINK_CNF_RCVD:
673 switch (event) {
674 case OPN_RJCT:
675 case CNF_RJCT:
676 reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
677 case CLS_ACPT:
678 if (!reason)
679 reason = cpu_to_le16(MESH_CLOSE_RCVD);
680 sta->reason = reason;
681 sta->plink_state = PLINK_HOLDING;
682 if (!mod_plink_timer(sta,
683 dot11MeshHoldingTimeout(sdata)))
684 sta->ignore_plink_timer = true;
685
686 llid = sta->llid;
687 spin_unlock_bh(&sta->plink_lock);
688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
689 plid, reason);
690 break;
691 case OPN_ACPT:
692 del_timer(&sta->plink_timer);
693 sta->plink_state = PLINK_ESTAB;
694 mesh_plink_inc_estab_count(sdata);
695 spin_unlock_bh(&sta->plink_lock);
696 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
697 print_mac(mac, sta->addr));
698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
699 plid, 0);
700 break;
701 default:
702 spin_unlock_bh(&sta->plink_lock);
703 break;
704 }
705 break;
706
707 case PLINK_ESTAB:
708 switch (event) {
709 case CLS_ACPT:
710 reason = cpu_to_le16(MESH_CLOSE_RCVD);
711 sta->reason = reason;
712 __mesh_plink_deactivate(sta);
713 sta->plink_state = PLINK_HOLDING;
714 llid = sta->llid;
715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
716 spin_unlock_bh(&sta->plink_lock);
717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
718 plid, reason);
719 break;
720 case OPN_ACPT:
721 llid = sta->llid;
722 spin_unlock_bh(&sta->plink_lock);
723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
724 plid, 0);
725 break;
726 default:
727 spin_unlock_bh(&sta->plink_lock);
728 break;
729 }
730 break;
731 case PLINK_HOLDING:
732 switch (event) {
733 case CLS_ACPT:
734 if (del_timer(&sta->plink_timer))
735 sta->ignore_plink_timer = 1;
736 mesh_plink_fsm_restart(sta);
737 spin_unlock_bh(&sta->plink_lock);
738 break;
739 case OPN_ACPT:
740 case CNF_ACPT:
741 case OPN_RJCT:
742 case CNF_RJCT:
743 llid = sta->llid;
744 reason = sta->reason;
745 spin_unlock_bh(&sta->plink_lock);
746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
747 plid, reason);
748 break;
749 default:
750 spin_unlock_bh(&sta->plink_lock);
751 }
752 break;
753 default:
754 /* should not get here, PLINK_BLOCKED is dealt with at the
755 * beggining of the function
756 */
757 spin_unlock_bh(&sta->plink_lock);
758 break;
759 }
760
761 rcu_read_unlock();
762}
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/mlme.c
index c1706855460a..6b75cb6c6300 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/mlme.c
@@ -24,19 +24,22 @@
24#include <linux/wireless.h> 24#include <linux/wireless.h>
25#include <linux/random.h> 25#include <linux/random.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27#include <linux/rtnetlink.h>
27#include <net/iw_handler.h> 28#include <net/iw_handler.h>
28#include <asm/types.h> 29#include <asm/types.h>
29 30
30#include <net/mac80211.h> 31#include <net/mac80211.h>
31#include "ieee80211_i.h" 32#include "ieee80211_i.h"
32#include "ieee80211_rate.h" 33#include "rate.h"
33#include "ieee80211_led.h" 34#include "led.h"
35#include "mesh.h"
34 36
35#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 37#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
36#define IEEE80211_AUTH_MAX_TRIES 3 38#define IEEE80211_AUTH_MAX_TRIES 3
37#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 39#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
38#define IEEE80211_ASSOC_MAX_TRIES 3 40#define IEEE80211_ASSOC_MAX_TRIES 3
39#define IEEE80211_MONITORING_INTERVAL (2 * HZ) 41#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
42#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
40#define IEEE80211_PROBE_INTERVAL (60 * HZ) 43#define IEEE80211_PROBE_INTERVAL (60 * HZ)
41#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) 44#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
42#define IEEE80211_SCAN_INTERVAL (2 * HZ) 45#define IEEE80211_SCAN_INTERVAL (2 * HZ)
@@ -49,12 +52,11 @@
49#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) 52#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
50#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) 53#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
51#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) 54#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
55#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
52 56
53#define IEEE80211_IBSS_MAX_STA_ENTRIES 128 57#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
54 58
55 59
56#define IEEE80211_FC(type, stype) cpu_to_le16(type | stype)
57
58#define ERP_INFO_USE_PROTECTION BIT(1) 60#define ERP_INFO_USE_PROTECTION BIT(1)
59 61
60/* mgmt header + 1 byte action code */ 62/* mgmt header + 1 byte action code */
@@ -74,7 +76,7 @@
74static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, 76static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
75 u8 *ssid, size_t ssid_len); 77 u8 *ssid, size_t ssid_len);
76static struct ieee80211_sta_bss * 78static struct ieee80211_sta_bss *
77ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel, 79ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
78 u8 *ssid, u8 ssid_len); 80 u8 *ssid, u8 ssid_len);
79static void ieee80211_rx_bss_put(struct net_device *dev, 81static void ieee80211_rx_bss_put(struct net_device *dev,
80 struct ieee80211_sta_bss *bss); 82 struct ieee80211_sta_bss *bss);
@@ -87,46 +89,8 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
87 struct ieee80211_if_sta *ifsta); 89 struct ieee80211_if_sta *ifsta);
88 90
89 91
90/* Parsed Information Elements */ 92void ieee802_11_parse_elems(u8 *start, size_t len,
91struct ieee802_11_elems { 93 struct ieee802_11_elems *elems)
92 /* pointers to IEs */
93 u8 *ssid;
94 u8 *supp_rates;
95 u8 *fh_params;
96 u8 *ds_params;
97 u8 *cf_params;
98 u8 *tim;
99 u8 *ibss_params;
100 u8 *challenge;
101 u8 *wpa;
102 u8 *rsn;
103 u8 *erp_info;
104 u8 *ext_supp_rates;
105 u8 *wmm_info;
106 u8 *wmm_param;
107 u8 *ht_cap_elem;
108 u8 *ht_info_elem;
109 /* length of them, respectively */
110 u8 ssid_len;
111 u8 supp_rates_len;
112 u8 fh_params_len;
113 u8 ds_params_len;
114 u8 cf_params_len;
115 u8 tim_len;
116 u8 ibss_params_len;
117 u8 challenge_len;
118 u8 wpa_len;
119 u8 rsn_len;
120 u8 erp_info_len;
121 u8 ext_supp_rates_len;
122 u8 wmm_info_len;
123 u8 wmm_param_len;
124 u8 ht_cap_elem_len;
125 u8 ht_info_elem_len;
126};
127
128static void ieee802_11_parse_elems(u8 *start, size_t len,
129 struct ieee802_11_elems *elems)
130{ 94{
131 size_t left = len; 95 size_t left = len;
132 u8 *pos = start; 96 u8 *pos = start;
@@ -215,6 +179,30 @@ static void ieee802_11_parse_elems(u8 *start, size_t len,
215 elems->ht_info_elem = pos; 179 elems->ht_info_elem = pos;
216 elems->ht_info_elem_len = elen; 180 elems->ht_info_elem_len = elen;
217 break; 181 break;
182 case WLAN_EID_MESH_ID:
183 elems->mesh_id = pos;
184 elems->mesh_id_len = elen;
185 break;
186 case WLAN_EID_MESH_CONFIG:
187 elems->mesh_config = pos;
188 elems->mesh_config_len = elen;
189 break;
190 case WLAN_EID_PEER_LINK:
191 elems->peer_link = pos;
192 elems->peer_link_len = elen;
193 break;
194 case WLAN_EID_PREQ:
195 elems->preq = pos;
196 elems->preq_len = elen;
197 break;
198 case WLAN_EID_PREP:
199 elems->prep = pos;
200 elems->prep_len = elen;
201 break;
202 case WLAN_EID_PERR:
203 elems->perr = pos;
204 elems->perr_len = elen;
205 break;
218 default: 206 default:
219 break; 207 break;
220 } 208 }
@@ -227,12 +215,61 @@ static void ieee802_11_parse_elems(u8 *start, size_t len,
227 215
228static int ecw2cw(int ecw) 216static int ecw2cw(int ecw)
229{ 217{
230 int cw = 1; 218 return (1 << ecw) - 1;
231 while (ecw > 0) { 219}
232 cw <<= 1; 220
233 ecw--; 221
222static void ieee80211_sta_def_wmm_params(struct net_device *dev,
223 struct ieee80211_sta_bss *bss,
224 int ibss)
225{
226 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
227 struct ieee80211_local *local = sdata->local;
228 int i, have_higher_than_11mbit = 0;
229
230
231 /* cf. IEEE 802.11 9.2.12 */
232 for (i = 0; i < bss->supp_rates_len; i++)
233 if ((bss->supp_rates[i] & 0x7f) * 5 > 110)
234 have_higher_than_11mbit = 1;
235
236 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
237 have_higher_than_11mbit)
238 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
239 else
240 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
241
242
243 if (local->ops->conf_tx) {
244 struct ieee80211_tx_queue_params qparam;
245
246 memset(&qparam, 0, sizeof(qparam));
247
248 qparam.aifs = 2;
249
250 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
251 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE))
252 qparam.cw_min = 31;
253 else
254 qparam.cw_min = 15;
255
256 qparam.cw_max = 1023;
257 qparam.txop = 0;
258
259 for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++)
260 local->ops->conf_tx(local_to_hw(local),
261 i + IEEE80211_TX_QUEUE_DATA0,
262 &qparam);
263
264 if (ibss) {
265 /* IBSS uses different parameters for Beacon sending */
266 qparam.cw_min++;
267 qparam.cw_min *= 2;
268 qparam.cw_min--;
269 local->ops->conf_tx(local_to_hw(local),
270 IEEE80211_TX_QUEUE_BEACON, &qparam);
271 }
234 } 272 }
235 return cw - 1;
236} 273}
237 274
238static void ieee80211_sta_wmm_params(struct net_device *dev, 275static void ieee80211_sta_wmm_params(struct net_device *dev,
@@ -297,12 +334,13 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
297 params.aifs = pos[0] & 0x0f; 334 params.aifs = pos[0] & 0x0f;
298 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); 335 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
299 params.cw_min = ecw2cw(pos[1] & 0x0f); 336 params.cw_min = ecw2cw(pos[1] & 0x0f);
300 /* TXOP is in units of 32 usec; burst_time in 0.1 ms */ 337 params.txop = pos[2] | (pos[3] << 8);
301 params.burst_time = (pos[2] | (pos[3] << 8)) * 32 / 100; 338#ifdef CONFIG_MAC80211_DEBUG
302 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 339 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
303 "cWmin=%d cWmax=%d burst=%d\n", 340 "cWmin=%d cWmax=%d txop=%d\n",
304 dev->name, queue, aci, acm, params.aifs, params.cw_min, 341 dev->name, queue, aci, acm, params.aifs, params.cw_min,
305 params.cw_max, params.burst_time); 342 params.cw_max, params.txop);
343#endif
306 /* TODO: handle ACM (block TX, fallback to next lowest allowed 344 /* TODO: handle ACM (block TX, fallback to next lowest allowed
307 * AC for now) */ 345 * AC for now) */
308 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) { 346 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
@@ -477,6 +515,7 @@ static void ieee80211_set_associated(struct net_device *dev,
477{ 515{
478 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 516 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
479 struct ieee80211_local *local = sdata->local; 517 struct ieee80211_local *local = sdata->local;
518 struct ieee80211_conf *conf = &local_to_hw(local)->conf;
480 union iwreq_data wrqu; 519 union iwreq_data wrqu;
481 u32 changed = BSS_CHANGED_ASSOC; 520 u32 changed = BSS_CHANGED_ASSOC;
482 521
@@ -489,31 +528,49 @@ static void ieee80211_set_associated(struct net_device *dev,
489 return; 528 return;
490 529
491 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 530 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
492 local->hw.conf.channel, 531 conf->channel->center_freq,
493 ifsta->ssid, ifsta->ssid_len); 532 ifsta->ssid, ifsta->ssid_len);
494 if (bss) { 533 if (bss) {
534 /* set timing information */
535 sdata->bss_conf.beacon_int = bss->beacon_int;
536 sdata->bss_conf.timestamp = bss->timestamp;
537
495 changed |= ieee80211_handle_bss_capability(sdata, bss); 538 changed |= ieee80211_handle_bss_capability(sdata, bss);
539
496 ieee80211_rx_bss_put(dev, bss); 540 ieee80211_rx_bss_put(dev, bss);
497 } 541 }
498 542
543 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
544 changed |= BSS_CHANGED_HT;
545 sdata->bss_conf.assoc_ht = 1;
546 sdata->bss_conf.ht_conf = &conf->ht_conf;
547 sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf;
548 }
549
499 netif_carrier_on(dev); 550 netif_carrier_on(dev);
500 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET; 551 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
501 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); 552 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
502 memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN); 553 memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN);
503 ieee80211_sta_send_associnfo(dev, ifsta); 554 ieee80211_sta_send_associnfo(dev, ifsta);
504 } else { 555 } else {
556 ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid);
505 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; 557 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
506
507 netif_carrier_off(dev); 558 netif_carrier_off(dev);
508 ieee80211_reset_erp_info(dev); 559 ieee80211_reset_erp_info(dev);
560
561 sdata->bss_conf.assoc_ht = 0;
562 sdata->bss_conf.ht_conf = NULL;
563 sdata->bss_conf.ht_bss_conf = NULL;
564
509 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); 565 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
510 } 566 }
511 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
512 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
513 ifsta->last_probe = jiffies; 567 ifsta->last_probe = jiffies;
514 ieee80211_led_assoc(local, assoc); 568 ieee80211_led_assoc(local, assoc);
515 569
570 sdata->bss_conf.assoc = assoc;
516 ieee80211_bss_info_change_notify(sdata, changed); 571 ieee80211_bss_info_change_notify(sdata, changed);
572 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
573 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
517} 574}
518 575
519static void ieee80211_set_disassoc(struct net_device *dev, 576static void ieee80211_set_disassoc(struct net_device *dev,
@@ -525,8 +582,8 @@ static void ieee80211_set_disassoc(struct net_device *dev,
525 ieee80211_set_associated(dev, ifsta, 0); 582 ieee80211_set_associated(dev, ifsta, 0);
526} 583}
527 584
528static void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, 585void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
529 int encrypt) 586 int encrypt)
530{ 587{
531 struct ieee80211_sub_if_data *sdata; 588 struct ieee80211_sub_if_data *sdata;
532 struct ieee80211_tx_packet_data *pkt_data; 589 struct ieee80211_tx_packet_data *pkt_data;
@@ -613,7 +670,6 @@ static void ieee80211_send_assoc(struct net_device *dev,
613 struct ieee80211_if_sta *ifsta) 670 struct ieee80211_if_sta *ifsta)
614{ 671{
615 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 672 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
616 struct ieee80211_hw_mode *mode;
617 struct sk_buff *skb; 673 struct sk_buff *skb;
618 struct ieee80211_mgmt *mgmt; 674 struct ieee80211_mgmt *mgmt;
619 u8 *pos, *ies; 675 u8 *pos, *ies;
@@ -621,6 +677,7 @@ static void ieee80211_send_assoc(struct net_device *dev,
621 u16 capab; 677 u16 capab;
622 struct ieee80211_sta_bss *bss; 678 struct ieee80211_sta_bss *bss;
623 int wmm = 0; 679 int wmm = 0;
680 struct ieee80211_supported_band *sband;
624 681
625 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 682 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
626 sizeof(*mgmt) + 200 + ifsta->extra_ie_len + 683 sizeof(*mgmt) + 200 + ifsta->extra_ie_len +
@@ -632,13 +689,19 @@ static void ieee80211_send_assoc(struct net_device *dev,
632 } 689 }
633 skb_reserve(skb, local->hw.extra_tx_headroom); 690 skb_reserve(skb, local->hw.extra_tx_headroom);
634 691
635 mode = local->oper_hw_mode; 692 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
693
636 capab = ifsta->capab; 694 capab = ifsta->capab;
637 if (mode->mode == MODE_IEEE80211G) { 695
638 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME | 696 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) {
639 WLAN_CAPABILITY_SHORT_PREAMBLE; 697 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
698 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
699 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
700 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
640 } 701 }
641 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel, 702
703 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
704 local->hw.conf.channel->center_freq,
642 ifsta->ssid, ifsta->ssid_len); 705 ifsta->ssid, ifsta->ssid_len);
643 if (bss) { 706 if (bss) {
644 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 707 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
@@ -677,23 +740,23 @@ static void ieee80211_send_assoc(struct net_device *dev,
677 *pos++ = ifsta->ssid_len; 740 *pos++ = ifsta->ssid_len;
678 memcpy(pos, ifsta->ssid, ifsta->ssid_len); 741 memcpy(pos, ifsta->ssid, ifsta->ssid_len);
679 742
680 len = mode->num_rates; 743 len = sband->n_bitrates;
681 if (len > 8) 744 if (len > 8)
682 len = 8; 745 len = 8;
683 pos = skb_put(skb, len + 2); 746 pos = skb_put(skb, len + 2);
684 *pos++ = WLAN_EID_SUPP_RATES; 747 *pos++ = WLAN_EID_SUPP_RATES;
685 *pos++ = len; 748 *pos++ = len;
686 for (i = 0; i < len; i++) { 749 for (i = 0; i < len; i++) {
687 int rate = mode->rates[i].rate; 750 int rate = sband->bitrates[i].bitrate;
688 *pos++ = (u8) (rate / 5); 751 *pos++ = (u8) (rate / 5);
689 } 752 }
690 753
691 if (mode->num_rates > len) { 754 if (sband->n_bitrates > len) {
692 pos = skb_put(skb, mode->num_rates - len + 2); 755 pos = skb_put(skb, sband->n_bitrates - len + 2);
693 *pos++ = WLAN_EID_EXT_SUPP_RATES; 756 *pos++ = WLAN_EID_EXT_SUPP_RATES;
694 *pos++ = mode->num_rates - len; 757 *pos++ = sband->n_bitrates - len;
695 for (i = len; i < mode->num_rates; i++) { 758 for (i = len; i < sband->n_bitrates; i++) {
696 int rate = mode->rates[i].rate; 759 int rate = sband->bitrates[i].bitrate;
697 *pos++ = (u8) (rate / 5); 760 *pos++ = (u8) (rate / 5);
698 } 761 }
699 } 762 }
@@ -716,17 +779,18 @@ static void ieee80211_send_assoc(struct net_device *dev,
716 *pos++ = 0; 779 *pos++ = 0;
717 } 780 }
718 /* wmm support is a must to HT */ 781 /* wmm support is a must to HT */
719 if (wmm && mode->ht_info.ht_supported) { 782 if (wmm && sband->ht_info.ht_supported) {
720 __le16 tmp = cpu_to_le16(mode->ht_info.cap); 783 __le16 tmp = cpu_to_le16(sband->ht_info.cap);
721 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); 784 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
722 *pos++ = WLAN_EID_HT_CAPABILITY; 785 *pos++ = WLAN_EID_HT_CAPABILITY;
723 *pos++ = sizeof(struct ieee80211_ht_cap); 786 *pos++ = sizeof(struct ieee80211_ht_cap);
724 memset(pos, 0, sizeof(struct ieee80211_ht_cap)); 787 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
725 memcpy(pos, &tmp, sizeof(u16)); 788 memcpy(pos, &tmp, sizeof(u16));
726 pos += sizeof(u16); 789 pos += sizeof(u16);
727 *pos++ = (mode->ht_info.ampdu_factor | 790 /* TODO: needs a define here for << 2 */
728 (mode->ht_info.ampdu_density << 2)); 791 *pos++ = sband->ht_info.ampdu_factor |
729 memcpy(pos, mode->ht_info.supp_mcs_set, 16); 792 (sband->ht_info.ampdu_density << 2);
793 memcpy(pos, sband->ht_info.supp_mcs_set, 16);
730 } 794 }
731 795
732 kfree(ifsta->assocreq_ies); 796 kfree(ifsta->assocreq_ies);
@@ -809,7 +873,8 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
809 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) 873 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
810 return 0; 874 return 0;
811 875
812 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel, 876 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
877 local->hw.conf.channel->center_freq,
813 ifsta->ssid, ifsta->ssid_len); 878 ifsta->ssid, ifsta->ssid_len);
814 if (!bss) 879 if (!bss)
815 return 0; 880 return 0;
@@ -872,6 +937,8 @@ static void ieee80211_associated(struct net_device *dev,
872 937
873 ifsta->state = IEEE80211_ASSOCIATED; 938 ifsta->state = IEEE80211_ASSOCIATED;
874 939
940 rcu_read_lock();
941
875 sta = sta_info_get(local, ifsta->bssid); 942 sta = sta_info_get(local, ifsta->bssid);
876 if (!sta) { 943 if (!sta) {
877 printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", 944 printk(KERN_DEBUG "%s: No STA entry for own AP %s\n",
@@ -887,7 +954,7 @@ static void ieee80211_associated(struct net_device *dev,
887 "range\n", 954 "range\n",
888 dev->name, print_mac(mac, ifsta->bssid)); 955 dev->name, print_mac(mac, ifsta->bssid));
889 disassoc = 1; 956 disassoc = 1;
890 sta_info_free(sta); 957 sta_info_unlink(&sta);
891 } else 958 } else
892 ieee80211_send_probe_req(dev, ifsta->bssid, 959 ieee80211_send_probe_req(dev, ifsta->bssid,
893 local->scan_ssid, 960 local->scan_ssid,
@@ -903,8 +970,13 @@ static void ieee80211_associated(struct net_device *dev,
903 ifsta->ssid_len); 970 ifsta->ssid_len);
904 } 971 }
905 } 972 }
906 sta_info_put(sta);
907 } 973 }
974
975 rcu_read_unlock();
976
977 if (disassoc && sta)
978 sta_info_destroy(sta);
979
908 if (disassoc) { 980 if (disassoc) {
909 ifsta->state = IEEE80211_DISABLED; 981 ifsta->state = IEEE80211_DISABLED;
910 ieee80211_set_associated(dev, ifsta, 0); 982 ieee80211_set_associated(dev, ifsta, 0);
@@ -919,7 +991,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
919 u8 *ssid, size_t ssid_len) 991 u8 *ssid, size_t ssid_len)
920{ 992{
921 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 993 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
922 struct ieee80211_hw_mode *mode; 994 struct ieee80211_supported_band *sband;
923 struct sk_buff *skb; 995 struct sk_buff *skb;
924 struct ieee80211_mgmt *mgmt; 996 struct ieee80211_mgmt *mgmt;
925 u8 *pos, *supp_rates, *esupp_rates = NULL; 997 u8 *pos, *supp_rates, *esupp_rates = NULL;
@@ -953,11 +1025,10 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
953 supp_rates = skb_put(skb, 2); 1025 supp_rates = skb_put(skb, 2);
954 supp_rates[0] = WLAN_EID_SUPP_RATES; 1026 supp_rates[0] = WLAN_EID_SUPP_RATES;
955 supp_rates[1] = 0; 1027 supp_rates[1] = 0;
956 mode = local->oper_hw_mode; 1028 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
957 for (i = 0; i < mode->num_rates; i++) { 1029
958 struct ieee80211_rate *rate = &mode->rates[i]; 1030 for (i = 0; i < sband->n_bitrates; i++) {
959 if (!(rate->flags & IEEE80211_RATE_SUPPORTED)) 1031 struct ieee80211_rate *rate = &sband->bitrates[i];
960 continue;
961 if (esupp_rates) { 1032 if (esupp_rates) {
962 pos = skb_put(skb, 1); 1033 pos = skb_put(skb, 1);
963 esupp_rates[1]++; 1034 esupp_rates[1]++;
@@ -970,7 +1041,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
970 pos = skb_put(skb, 1); 1041 pos = skb_put(skb, 1);
971 supp_rates[1]++; 1042 supp_rates[1]++;
972 } 1043 }
973 *pos = rate->rate / 5; 1044 *pos = rate->bitrate / 5;
974 } 1045 }
975 1046
976 ieee80211_sta_tx(dev, skb, 0); 1047 ieee80211_sta_tx(dev, skb, 0);
@@ -1065,6 +1136,58 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
1065 return; 1136 return;
1066} 1137}
1067 1138
1139void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
1140 u16 tid, u8 dialog_token, u16 start_seq_num,
1141 u16 agg_size, u16 timeout)
1142{
1143 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1144 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1145 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1146 struct sk_buff *skb;
1147 struct ieee80211_mgmt *mgmt;
1148 u16 capab;
1149
1150 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 +
1151 sizeof(mgmt->u.action.u.addba_req));
1152
1153
1154 if (!skb) {
1155 printk(KERN_ERR "%s: failed to allocate buffer "
1156 "for addba request frame\n", dev->name);
1157 return;
1158 }
1159 skb_reserve(skb, local->hw.extra_tx_headroom);
1160 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
1161 memset(mgmt, 0, 24);
1162 memcpy(mgmt->da, da, ETH_ALEN);
1163 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
1164 if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
1165 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
1166 else
1167 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1168
1169 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1170 IEEE80211_STYPE_ACTION);
1171
1172 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
1173
1174 mgmt->u.action.category = WLAN_CATEGORY_BACK;
1175 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
1176
1177 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
1178 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
1179 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
1180 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
1181
1182 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
1183
1184 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
1185 mgmt->u.action.u.addba_req.start_seq_num =
1186 cpu_to_le16(start_seq_num << 4);
1187
1188 ieee80211_sta_tx(dev, skb, 0);
1189}
1190
1068static void ieee80211_sta_process_addba_request(struct net_device *dev, 1191static void ieee80211_sta_process_addba_request(struct net_device *dev,
1069 struct ieee80211_mgmt *mgmt, 1192 struct ieee80211_mgmt *mgmt,
1070 size_t len) 1193 size_t len)
@@ -1079,9 +1202,13 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1079 int ret = -EOPNOTSUPP; 1202 int ret = -EOPNOTSUPP;
1080 DECLARE_MAC_BUF(mac); 1203 DECLARE_MAC_BUF(mac);
1081 1204
1205 rcu_read_lock();
1206
1082 sta = sta_info_get(local, mgmt->sa); 1207 sta = sta_info_get(local, mgmt->sa);
1083 if (!sta) 1208 if (!sta) {
1209 rcu_read_unlock();
1084 return; 1210 return;
1211 }
1085 1212
1086 /* extract session parameters from addba request frame */ 1213 /* extract session parameters from addba request frame */
1087 dialog_token = mgmt->u.action.u.addba_req.dialog_token; 1214 dialog_token = mgmt->u.action.u.addba_req.dialog_token;
@@ -1105,7 +1232,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1105 status = WLAN_STATUS_INVALID_QOS_PARAM; 1232 status = WLAN_STATUS_INVALID_QOS_PARAM;
1106#ifdef CONFIG_MAC80211_HT_DEBUG 1233#ifdef CONFIG_MAC80211_HT_DEBUG
1107 if (net_ratelimit()) 1234 if (net_ratelimit())
1108 printk(KERN_DEBUG "Block Ack Req with bad params from " 1235 printk(KERN_DEBUG "AddBA Req with bad params from "
1109 "%s on tid %u. policy %d, buffer size %d\n", 1236 "%s on tid %u. policy %d, buffer size %d\n",
1110 print_mac(mac, mgmt->sa), tid, ba_policy, 1237 print_mac(mac, mgmt->sa), tid, ba_policy,
1111 buf_size); 1238 buf_size);
@@ -1114,26 +1241,45 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1114 } 1241 }
1115 /* determine default buffer size */ 1242 /* determine default buffer size */
1116 if (buf_size == 0) { 1243 if (buf_size == 0) {
1117 struct ieee80211_hw_mode *mode = conf->mode; 1244 struct ieee80211_supported_band *sband;
1245
1246 sband = local->hw.wiphy->bands[conf->channel->band];
1118 buf_size = IEEE80211_MIN_AMPDU_BUF; 1247 buf_size = IEEE80211_MIN_AMPDU_BUF;
1119 buf_size = buf_size << mode->ht_info.ampdu_factor; 1248 buf_size = buf_size << sband->ht_info.ampdu_factor;
1120 } 1249 }
1121 1250
1122 tid_agg_rx = &sta->ampdu_mlme.tid_rx[tid];
1123 1251
1124 /* examine state machine */ 1252 /* examine state machine */
1125 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1253 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
1126 1254
1127 if (tid_agg_rx->state != HT_AGG_STATE_IDLE) { 1255 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
1128#ifdef CONFIG_MAC80211_HT_DEBUG 1256#ifdef CONFIG_MAC80211_HT_DEBUG
1129 if (net_ratelimit()) 1257 if (net_ratelimit())
1130 printk(KERN_DEBUG "unexpected Block Ack Req from " 1258 printk(KERN_DEBUG "unexpected AddBA Req from "
1131 "%s on tid %u\n", 1259 "%s on tid %u\n",
1132 print_mac(mac, mgmt->sa), tid); 1260 print_mac(mac, mgmt->sa), tid);
1133#endif /* CONFIG_MAC80211_HT_DEBUG */ 1261#endif /* CONFIG_MAC80211_HT_DEBUG */
1134 goto end; 1262 goto end;
1135 } 1263 }
1136 1264
1265 /* prepare A-MPDU MLME for Rx aggregation */
1266 sta->ampdu_mlme.tid_rx[tid] =
1267 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
1268 if (!sta->ampdu_mlme.tid_rx[tid]) {
1269 if (net_ratelimit())
1270 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
1271 tid);
1272 goto end;
1273 }
1274 /* rx timer */
1275 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
1276 sta_rx_agg_session_timer_expired;
1277 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
1278 (unsigned long)&sta->timer_to_tid[tid];
1279 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
1280
1281 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
1282
1137 /* prepare reordering buffer */ 1283 /* prepare reordering buffer */
1138 tid_agg_rx->reorder_buf = 1284 tid_agg_rx->reorder_buf =
1139 kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC); 1285 kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC);
@@ -1141,6 +1287,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1141 if (net_ratelimit()) 1287 if (net_ratelimit())
1142 printk(KERN_ERR "can not allocate reordering buffer " 1288 printk(KERN_ERR "can not allocate reordering buffer "
1143 "to tid %d\n", tid); 1289 "to tid %d\n", tid);
1290 kfree(sta->ampdu_mlme.tid_rx[tid]);
1144 goto end; 1291 goto end;
1145 } 1292 }
1146 memset(tid_agg_rx->reorder_buf, 0, 1293 memset(tid_agg_rx->reorder_buf, 0,
@@ -1148,18 +1295,20 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1148 1295
1149 if (local->ops->ampdu_action) 1296 if (local->ops->ampdu_action)
1150 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, 1297 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
1151 sta->addr, tid, start_seq_num); 1298 sta->addr, tid, &start_seq_num);
1152#ifdef CONFIG_MAC80211_HT_DEBUG 1299#ifdef CONFIG_MAC80211_HT_DEBUG
1153 printk(KERN_DEBUG "Rx A-MPDU on tid %d result %d", tid, ret); 1300 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
1154#endif /* CONFIG_MAC80211_HT_DEBUG */ 1301#endif /* CONFIG_MAC80211_HT_DEBUG */
1155 1302
1156 if (ret) { 1303 if (ret) {
1157 kfree(tid_agg_rx->reorder_buf); 1304 kfree(tid_agg_rx->reorder_buf);
1305 kfree(tid_agg_rx);
1306 sta->ampdu_mlme.tid_rx[tid] = NULL;
1158 goto end; 1307 goto end;
1159 } 1308 }
1160 1309
1161 /* change state and send addba resp */ 1310 /* change state and send addba resp */
1162 tid_agg_rx->state = HT_AGG_STATE_OPERATIONAL; 1311 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
1163 tid_agg_rx->dialog_token = dialog_token; 1312 tid_agg_rx->dialog_token = dialog_token;
1164 tid_agg_rx->ssn = start_seq_num; 1313 tid_agg_rx->ssn = start_seq_num;
1165 tid_agg_rx->head_seq_num = start_seq_num; 1314 tid_agg_rx->head_seq_num = start_seq_num;
@@ -1171,13 +1320,89 @@ end:
1171 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1320 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
1172 1321
1173end_no_lock: 1322end_no_lock:
1174 ieee80211_send_addba_resp(sta->dev, sta->addr, tid, dialog_token, 1323 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid,
1175 status, 1, buf_size, timeout); 1324 dialog_token, status, 1, buf_size, timeout);
1176 sta_info_put(sta); 1325 rcu_read_unlock();
1177} 1326}
1178 1327
1179static void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, 1328static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1180 u16 initiator, u16 reason_code) 1329 struct ieee80211_mgmt *mgmt,
1330 size_t len)
1331{
1332 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1333 struct ieee80211_hw *hw = &local->hw;
1334 struct sta_info *sta;
1335 u16 capab;
1336 u16 tid;
1337 u8 *state;
1338
1339 rcu_read_lock();
1340
1341 sta = sta_info_get(local, mgmt->sa);
1342 if (!sta) {
1343 rcu_read_unlock();
1344 return;
1345 }
1346
1347 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
1348 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
1349
1350 state = &sta->ampdu_mlme.tid_state_tx[tid];
1351
1352 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1353
1354 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1355 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1356 printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
1357 "%d\n", *state);
1358 goto addba_resp_exit;
1359 }
1360
1361 if (mgmt->u.action.u.addba_resp.dialog_token !=
1362 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
1363 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1364#ifdef CONFIG_MAC80211_HT_DEBUG
1365 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1366#endif /* CONFIG_MAC80211_HT_DEBUG */
1367 goto addba_resp_exit;
1368 }
1369
1370 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
1371#ifdef CONFIG_MAC80211_HT_DEBUG
1372 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
1373#endif /* CONFIG_MAC80211_HT_DEBUG */
1374 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1375 == WLAN_STATUS_SUCCESS) {
1376 if (*state & HT_ADDBA_RECEIVED_MSK)
1377 printk(KERN_DEBUG "double addBA response\n");
1378
1379 *state |= HT_ADDBA_RECEIVED_MSK;
1380 sta->ampdu_mlme.addba_req_num[tid] = 0;
1381
1382 if (*state == HT_AGG_STATE_OPERATIONAL) {
1383 printk(KERN_DEBUG "Aggregation on for tid %d \n", tid);
1384 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
1385 }
1386
1387 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1388 printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid);
1389 } else {
1390 printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid);
1391
1392 sta->ampdu_mlme.addba_req_num[tid]++;
1393 /* this will allow the state check in stop_BA_session */
1394 *state = HT_AGG_STATE_OPERATIONAL;
1395 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1396 ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
1397 WLAN_BACK_INITIATOR);
1398 }
1399
1400addba_resp_exit:
1401 rcu_read_unlock();
1402}
1403
1404void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1405 u16 initiator, u16 reason_code)
1181{ 1406{
1182 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1407 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1183 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1408 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1227,58 +1452,71 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1227 struct ieee80211_hw *hw = &local->hw; 1452 struct ieee80211_hw *hw = &local->hw;
1228 struct sta_info *sta; 1453 struct sta_info *sta;
1229 int ret, i; 1454 int ret, i;
1455 DECLARE_MAC_BUF(mac);
1456
1457 rcu_read_lock();
1230 1458
1231 sta = sta_info_get(local, ra); 1459 sta = sta_info_get(local, ra);
1232 if (!sta) 1460 if (!sta) {
1461 rcu_read_unlock();
1233 return; 1462 return;
1463 }
1234 1464
1235 /* check if TID is in operational state */ 1465 /* check if TID is in operational state */
1236 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1466 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
1237 if (sta->ampdu_mlme.tid_rx[tid].state 1467 if (sta->ampdu_mlme.tid_state_rx[tid]
1238 != HT_AGG_STATE_OPERATIONAL) { 1468 != HT_AGG_STATE_OPERATIONAL) {
1239 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1469 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
1240 sta_info_put(sta); 1470 rcu_read_unlock();
1241 return; 1471 return;
1242 } 1472 }
1243 sta->ampdu_mlme.tid_rx[tid].state = 1473 sta->ampdu_mlme.tid_state_rx[tid] =
1244 HT_AGG_STATE_REQ_STOP_BA_MSK | 1474 HT_AGG_STATE_REQ_STOP_BA_MSK |
1245 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 1475 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
1246 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1476 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
1247 1477
1248 /* stop HW Rx aggregation. ampdu_action existence 1478 /* stop HW Rx aggregation. ampdu_action existence
1249 * already verified in session init so we add the BUG_ON */ 1479 * already verified in session init so we add the BUG_ON */
1250 BUG_ON(!local->ops->ampdu_action); 1480 BUG_ON(!local->ops->ampdu_action);
1251 1481
1482#ifdef CONFIG_MAC80211_HT_DEBUG
1483 printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n",
1484 print_mac(mac, ra), tid);
1485#endif /* CONFIG_MAC80211_HT_DEBUG */
1486
1252 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, 1487 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
1253 ra, tid, EINVAL); 1488 ra, tid, NULL);
1254 if (ret) 1489 if (ret)
1255 printk(KERN_DEBUG "HW problem - can not stop rx " 1490 printk(KERN_DEBUG "HW problem - can not stop rx "
1256 "aggergation for tid %d\n", tid); 1491 "aggergation for tid %d\n", tid);
1257 1492
1258 /* shutdown timer has not expired */ 1493 /* shutdown timer has not expired */
1259 if (initiator != WLAN_BACK_TIMER) 1494 if (initiator != WLAN_BACK_TIMER)
1260 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]. 1495 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
1261 session_timer);
1262 1496
1263 /* check if this is a self generated aggregation halt */ 1497 /* check if this is a self generated aggregation halt */
1264 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) 1498 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
1265 ieee80211_send_delba(dev, ra, tid, 0, reason); 1499 ieee80211_send_delba(dev, ra, tid, 0, reason);
1266 1500
1267 /* free the reordering buffer */ 1501 /* free the reordering buffer */
1268 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid].buf_size; i++) { 1502 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
1269 if (sta->ampdu_mlme.tid_rx[tid].reorder_buf[i]) { 1503 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
1270 /* release the reordered frames */ 1504 /* release the reordered frames */
1271 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid].reorder_buf[i]); 1505 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
1272 sta->ampdu_mlme.tid_rx[tid].stored_mpdu_num--; 1506 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
1273 sta->ampdu_mlme.tid_rx[tid].reorder_buf[i] = NULL; 1507 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
1274 } 1508 }
1275 } 1509 }
1276 kfree(sta->ampdu_mlme.tid_rx[tid].reorder_buf); 1510 /* free resources */
1511 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
1512 kfree(sta->ampdu_mlme.tid_rx[tid]);
1513 sta->ampdu_mlme.tid_rx[tid] = NULL;
1514 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
1277 1515
1278 sta->ampdu_mlme.tid_rx[tid].state = HT_AGG_STATE_IDLE; 1516 rcu_read_unlock();
1279 sta_info_put(sta);
1280} 1517}
1281 1518
1519
1282static void ieee80211_sta_process_delba(struct net_device *dev, 1520static void ieee80211_sta_process_delba(struct net_device *dev,
1283 struct ieee80211_mgmt *mgmt, size_t len) 1521 struct ieee80211_mgmt *mgmt, size_t len)
1284{ 1522{
@@ -1288,9 +1526,13 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
1288 u16 initiator; 1526 u16 initiator;
1289 DECLARE_MAC_BUF(mac); 1527 DECLARE_MAC_BUF(mac);
1290 1528
1529 rcu_read_lock();
1530
1291 sta = sta_info_get(local, mgmt->sa); 1531 sta = sta_info_get(local, mgmt->sa);
1292 if (!sta) 1532 if (!sta) {
1533 rcu_read_unlock();
1293 return; 1534 return;
1535 }
1294 1536
1295 params = le16_to_cpu(mgmt->u.action.u.delba.params); 1537 params = le16_to_cpu(mgmt->u.action.u.delba.params);
1296 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; 1538 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
@@ -1298,27 +1540,87 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
1298 1540
1299#ifdef CONFIG_MAC80211_HT_DEBUG 1541#ifdef CONFIG_MAC80211_HT_DEBUG
1300 if (net_ratelimit()) 1542 if (net_ratelimit())
1301 printk(KERN_DEBUG "delba from %s on tid %d reason code %d\n", 1543 printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n",
1302 print_mac(mac, mgmt->sa), tid, 1544 print_mac(mac, mgmt->sa),
1545 initiator ? "initiator" : "recipient", tid,
1303 mgmt->u.action.u.delba.reason_code); 1546 mgmt->u.action.u.delba.reason_code);
1304#endif /* CONFIG_MAC80211_HT_DEBUG */ 1547#endif /* CONFIG_MAC80211_HT_DEBUG */
1305 1548
1306 if (initiator == WLAN_BACK_INITIATOR) 1549 if (initiator == WLAN_BACK_INITIATOR)
1307 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, 1550 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
1308 WLAN_BACK_INITIATOR, 0); 1551 WLAN_BACK_INITIATOR, 0);
1309 sta_info_put(sta); 1552 else { /* WLAN_BACK_RECIPIENT */
1553 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1554 sta->ampdu_mlme.tid_state_tx[tid] =
1555 HT_AGG_STATE_OPERATIONAL;
1556 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1557 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
1558 WLAN_BACK_RECIPIENT);
1559 }
1560 rcu_read_unlock();
1310} 1561}
1311 1562
1312/* 1563/*
1313 * After receiving Block Ack Request (BAR) we activated a 1564 * After sending add Block Ack request we activated a timer until
1314 * timer after each frame arrives from the originator. 1565 * add Block Ack response will arrive from the recipient.
1566 * If this timer expires sta_addba_resp_timer_expired will be executed.
1567 */
1568void sta_addba_resp_timer_expired(unsigned long data)
1569{
1570 /* not an elegant detour, but there is no choice as the timer passes
1571 * only one argument, and both sta_info and TID are needed, so init
1572 * flow in sta_info_create gives the TID as data, while the timer_to_id
1573 * array gives the sta through container_of */
1574 u16 tid = *(int *)data;
1575 struct sta_info *temp_sta = container_of((void *)data,
1576 struct sta_info, timer_to_tid[tid]);
1577
1578 struct ieee80211_local *local = temp_sta->local;
1579 struct ieee80211_hw *hw = &local->hw;
1580 struct sta_info *sta;
1581 u8 *state;
1582
1583 rcu_read_lock();
1584
1585 sta = sta_info_get(local, temp_sta->addr);
1586 if (!sta) {
1587 rcu_read_unlock();
1588 return;
1589 }
1590
1591 state = &sta->ampdu_mlme.tid_state_tx[tid];
1592 /* check if the TID waits for addBA response */
1593 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1594 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1595 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1596 *state = HT_AGG_STATE_IDLE;
1597 printk(KERN_DEBUG "timer expired on tid %d but we are not "
1598 "expecting addBA response there", tid);
1599 goto timer_expired_exit;
1600 }
1601
1602 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
1603
1604 /* go through the state check in stop_BA_session */
1605 *state = HT_AGG_STATE_OPERATIONAL;
1606 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1607 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
1608 WLAN_BACK_INITIATOR);
1609
1610timer_expired_exit:
1611 rcu_read_unlock();
1612}
1613
1614/*
1615 * After accepting the AddBA Request we activated a timer,
1616 * resetting it after each frame that arrives from the originator.
1315 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. 1617 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
1316 */ 1618 */
1317void sta_rx_agg_session_timer_expired(unsigned long data) 1619void sta_rx_agg_session_timer_expired(unsigned long data)
1318{ 1620{
1319 /* not an elegant detour, but there is no choice as the timer passes 1621 /* not an elegant detour, but there is no choice as the timer passes
1320 * only one argument, and verious sta_info are needed here, so init 1622 * only one argument, and verious sta_info are needed here, so init
1321 * flow in sta_info_add gives the TID as data, while the timer_to_id 1623 * flow in sta_info_create gives the TID as data, while the timer_to_id
1322 * array gives the sta through container_of */ 1624 * array gives the sta through container_of */
1323 u8 *ptid = (u8 *)data; 1625 u8 *ptid = (u8 *)data;
1324 u8 *timer_to_id = ptid - *ptid; 1626 u8 *timer_to_id = ptid - *ptid;
@@ -1326,11 +1628,24 @@ void sta_rx_agg_session_timer_expired(unsigned long data)
1326 timer_to_tid[0]); 1628 timer_to_tid[0]);
1327 1629
1328 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 1630 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
1329 ieee80211_sta_stop_rx_ba_session(sta->dev, sta->addr, (u16)*ptid, 1631 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr,
1330 WLAN_BACK_TIMER, 1632 (u16)*ptid, WLAN_BACK_TIMER,
1331 WLAN_REASON_QSTA_TIMEOUT); 1633 WLAN_REASON_QSTA_TIMEOUT);
1332} 1634}
1333 1635
1636void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr)
1637{
1638 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1639 int i;
1640
1641 for (i = 0; i < STA_TID_NUM; i++) {
1642 ieee80211_stop_tx_ba_session(&local->hw, addr, i,
1643 WLAN_BACK_INITIATOR);
1644 ieee80211_sta_stop_rx_ba_session(dev, addr, i,
1645 WLAN_BACK_RECIPIENT,
1646 WLAN_REASON_QSTA_LEAVE_QBSS);
1647 }
1648}
1334 1649
1335static void ieee80211_rx_mgmt_auth(struct net_device *dev, 1650static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1336 struct ieee80211_if_sta *ifsta, 1651 struct ieee80211_if_sta *ifsta,
@@ -1557,15 +1872,16 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1557{ 1872{
1558 struct ieee80211_local *local = sdata->local; 1873 struct ieee80211_local *local = sdata->local;
1559 struct net_device *dev = sdata->dev; 1874 struct net_device *dev = sdata->dev;
1560 struct ieee80211_hw_mode *mode; 1875 struct ieee80211_supported_band *sband;
1561 struct sta_info *sta; 1876 struct sta_info *sta;
1562 u32 rates; 1877 u64 rates, basic_rates;
1563 u16 capab_info, status_code, aid; 1878 u16 capab_info, status_code, aid;
1564 struct ieee802_11_elems elems; 1879 struct ieee802_11_elems elems;
1565 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; 1880 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
1566 u8 *pos; 1881 u8 *pos;
1567 int i, j; 1882 int i, j;
1568 DECLARE_MAC_BUF(mac); 1883 DECLARE_MAC_BUF(mac);
1884 bool have_higher_than_11mbit = false;
1569 1885
1570 /* AssocResp and ReassocResp have identical structure, so process both 1886 /* AssocResp and ReassocResp have identical structure, so process both
1571 * of them in this function. */ 1887 * of them in this function. */
@@ -1635,22 +1951,23 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1635 if (ifsta->assocresp_ies) 1951 if (ifsta->assocresp_ies)
1636 memcpy(ifsta->assocresp_ies, pos, ifsta->assocresp_ies_len); 1952 memcpy(ifsta->assocresp_ies, pos, ifsta->assocresp_ies_len);
1637 1953
1638 /* set AID, ieee80211_set_associated() will tell the driver */ 1954 rcu_read_lock();
1639 bss_conf->aid = aid;
1640 ieee80211_set_associated(dev, ifsta, 1);
1641 1955
1642 /* Add STA entry for the AP */ 1956 /* Add STA entry for the AP */
1643 sta = sta_info_get(local, ifsta->bssid); 1957 sta = sta_info_get(local, ifsta->bssid);
1644 if (!sta) { 1958 if (!sta) {
1645 struct ieee80211_sta_bss *bss; 1959 struct ieee80211_sta_bss *bss;
1646 sta = sta_info_add(local, dev, ifsta->bssid, GFP_KERNEL); 1960 int err;
1961
1962 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC);
1647 if (!sta) { 1963 if (!sta) {
1648 printk(KERN_DEBUG "%s: failed to add STA entry for the" 1964 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1649 " AP\n", dev->name); 1965 " the AP\n", dev->name);
1966 rcu_read_unlock();
1650 return; 1967 return;
1651 } 1968 }
1652 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 1969 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
1653 local->hw.conf.channel, 1970 local->hw.conf.channel->center_freq,
1654 ifsta->ssid, ifsta->ssid_len); 1971 ifsta->ssid, ifsta->ssid_len);
1655 if (bss) { 1972 if (bss) {
1656 sta->last_rssi = bss->rssi; 1973 sta->last_rssi = bss->rssi;
@@ -1658,50 +1975,97 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1658 sta->last_noise = bss->noise; 1975 sta->last_noise = bss->noise;
1659 ieee80211_rx_bss_put(dev, bss); 1976 ieee80211_rx_bss_put(dev, bss);
1660 } 1977 }
1978
1979 err = sta_info_insert(sta);
1980 if (err) {
1981 printk(KERN_DEBUG "%s: failed to insert STA entry for"
1982 " the AP (error %d)\n", dev->name, err);
1983 rcu_read_unlock();
1984 return;
1985 }
1661 } 1986 }
1662 1987
1663 sta->dev = dev; 1988 /*
1664 sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP; 1989 * FIXME: Do we really need to update the sta_info's information here?
1990 * We already know about the AP (we found it in our list) so it
1991 * should already be filled with the right info, no?
1992 * As is stands, all this is racy because typically we assume
1993 * the information that is filled in here (except flags) doesn't
1994 * change while a STA structure is alive. As such, it should move
1995 * to between the sta_info_alloc() and sta_info_insert() above.
1996 */
1997
1998 sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP |
1999 WLAN_STA_AUTHORIZED;
1665 2000
1666 rates = 0; 2001 rates = 0;
1667 mode = local->oper_hw_mode; 2002 basic_rates = 0;
2003 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2004
1668 for (i = 0; i < elems.supp_rates_len; i++) { 2005 for (i = 0; i < elems.supp_rates_len; i++) {
1669 int rate = (elems.supp_rates[i] & 0x7f) * 5; 2006 int rate = (elems.supp_rates[i] & 0x7f) * 5;
1670 for (j = 0; j < mode->num_rates; j++) 2007
1671 if (mode->rates[j].rate == rate) 2008 if (rate > 110)
2009 have_higher_than_11mbit = true;
2010
2011 for (j = 0; j < sband->n_bitrates; j++) {
2012 if (sband->bitrates[j].bitrate == rate)
1672 rates |= BIT(j); 2013 rates |= BIT(j);
2014 if (elems.supp_rates[i] & 0x80)
2015 basic_rates |= BIT(j);
2016 }
1673 } 2017 }
2018
1674 for (i = 0; i < elems.ext_supp_rates_len; i++) { 2019 for (i = 0; i < elems.ext_supp_rates_len; i++) {
1675 int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; 2020 int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
1676 for (j = 0; j < mode->num_rates; j++) 2021
1677 if (mode->rates[j].rate == rate) 2022 if (rate > 110)
2023 have_higher_than_11mbit = true;
2024
2025 for (j = 0; j < sband->n_bitrates; j++) {
2026 if (sband->bitrates[j].bitrate == rate)
1678 rates |= BIT(j); 2027 rates |= BIT(j);
2028 if (elems.ext_supp_rates[i] & 0x80)
2029 basic_rates |= BIT(j);
2030 }
1679 } 2031 }
1680 sta->supp_rates = rates;
1681 2032
1682 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && 2033 sta->supp_rates[local->hw.conf.channel->band] = rates;
1683 local->ops->conf_ht) { 2034 sdata->basic_rates = basic_rates;
1684 struct ieee80211_ht_bss_info bss_info;
1685 2035
2036 /* cf. IEEE 802.11 9.2.12 */
2037 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
2038 have_higher_than_11mbit)
2039 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
2040 else
2041 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
2042
2043 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param) {
2044 struct ieee80211_ht_bss_info bss_info;
1686 ieee80211_ht_cap_ie_to_ht_info( 2045 ieee80211_ht_cap_ie_to_ht_info(
1687 (struct ieee80211_ht_cap *) 2046 (struct ieee80211_ht_cap *)
1688 elems.ht_cap_elem, &sta->ht_info); 2047 elems.ht_cap_elem, &sta->ht_info);
1689 ieee80211_ht_addt_info_ie_to_ht_bss_info( 2048 ieee80211_ht_addt_info_ie_to_ht_bss_info(
1690 (struct ieee80211_ht_addt_info *) 2049 (struct ieee80211_ht_addt_info *)
1691 elems.ht_info_elem, &bss_info); 2050 elems.ht_info_elem, &bss_info);
1692 ieee80211_hw_config_ht(local, 1, &sta->ht_info, &bss_info); 2051 ieee80211_handle_ht(local, 1, &sta->ht_info, &bss_info);
1693 } 2052 }
1694 2053
1695 rate_control_rate_init(sta, local); 2054 rate_control_rate_init(sta, local);
1696 2055
1697 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { 2056 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
1698 sta->flags |= WLAN_STA_WME; 2057 sta->flags |= WLAN_STA_WME;
2058 rcu_read_unlock();
1699 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 2059 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
1700 elems.wmm_param_len); 2060 elems.wmm_param_len);
1701 } 2061 } else
1702 2062 rcu_read_unlock();
1703 2063
1704 sta_info_put(sta); 2064 /* set AID and assoc capability,
2065 * ieee80211_set_associated() will tell the driver */
2066 bss_conf->aid = aid;
2067 bss_conf->assoc_capability = capab_info;
2068 ieee80211_set_associated(dev, ifsta, 1);
1705 2069
1706 ieee80211_associated(dev, ifsta); 2070 ieee80211_associated(dev, ifsta);
1707} 2071}
@@ -1712,8 +2076,16 @@ static void __ieee80211_rx_bss_hash_add(struct net_device *dev,
1712 struct ieee80211_sta_bss *bss) 2076 struct ieee80211_sta_bss *bss)
1713{ 2077{
1714 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2078 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1715 bss->hnext = local->sta_bss_hash[STA_HASH(bss->bssid)]; 2079 u8 hash_idx;
1716 local->sta_bss_hash[STA_HASH(bss->bssid)] = bss; 2080
2081 if (bss_mesh_cfg(bss))
2082 hash_idx = mesh_id_hash(bss_mesh_id(bss),
2083 bss_mesh_id_len(bss));
2084 else
2085 hash_idx = STA_HASH(bss->bssid);
2086
2087 bss->hnext = local->sta_bss_hash[hash_idx];
2088 local->sta_bss_hash[hash_idx] = bss;
1717} 2089}
1718 2090
1719 2091
@@ -1740,7 +2112,7 @@ static void __ieee80211_rx_bss_hash_del(struct net_device *dev,
1740 2112
1741 2113
1742static struct ieee80211_sta_bss * 2114static struct ieee80211_sta_bss *
1743ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel, 2115ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq,
1744 u8 *ssid, u8 ssid_len) 2116 u8 *ssid, u8 ssid_len)
1745{ 2117{
1746 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2118 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -1752,7 +2124,7 @@ ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel,
1752 atomic_inc(&bss->users); 2124 atomic_inc(&bss->users);
1753 atomic_inc(&bss->users); 2125 atomic_inc(&bss->users);
1754 memcpy(bss->bssid, bssid, ETH_ALEN); 2126 memcpy(bss->bssid, bssid, ETH_ALEN);
1755 bss->channel = channel; 2127 bss->freq = freq;
1756 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) { 2128 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
1757 memcpy(bss->ssid, ssid, ssid_len); 2129 memcpy(bss->ssid, ssid, ssid_len);
1758 bss->ssid_len = ssid_len; 2130 bss->ssid_len = ssid_len;
@@ -1766,9 +2138,8 @@ ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel,
1766 return bss; 2138 return bss;
1767} 2139}
1768 2140
1769
1770static struct ieee80211_sta_bss * 2141static struct ieee80211_sta_bss *
1771ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel, 2142ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
1772 u8 *ssid, u8 ssid_len) 2143 u8 *ssid, u8 ssid_len)
1773{ 2144{
1774 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2145 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -1777,8 +2148,9 @@ ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel,
1777 spin_lock_bh(&local->sta_bss_lock); 2148 spin_lock_bh(&local->sta_bss_lock);
1778 bss = local->sta_bss_hash[STA_HASH(bssid)]; 2149 bss = local->sta_bss_hash[STA_HASH(bssid)];
1779 while (bss) { 2150 while (bss) {
1780 if (!memcmp(bss->bssid, bssid, ETH_ALEN) && 2151 if (!bss_mesh_cfg(bss) &&
1781 bss->channel == channel && 2152 !memcmp(bss->bssid, bssid, ETH_ALEN) &&
2153 bss->freq == freq &&
1782 bss->ssid_len == ssid_len && 2154 bss->ssid_len == ssid_len &&
1783 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) { 2155 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
1784 atomic_inc(&bss->users); 2156 atomic_inc(&bss->users);
@@ -1790,6 +2162,75 @@ ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel,
1790 return bss; 2162 return bss;
1791} 2163}
1792 2164
2165#ifdef CONFIG_MAC80211_MESH
2166static struct ieee80211_sta_bss *
2167ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2168 u8 *mesh_cfg, int freq)
2169{
2170 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2171 struct ieee80211_sta_bss *bss;
2172
2173 spin_lock_bh(&local->sta_bss_lock);
2174 bss = local->sta_bss_hash[mesh_id_hash(mesh_id, mesh_id_len)];
2175 while (bss) {
2176 if (bss_mesh_cfg(bss) &&
2177 !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) &&
2178 bss->freq == freq &&
2179 mesh_id_len == bss->mesh_id_len &&
2180 (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id,
2181 mesh_id_len))) {
2182 atomic_inc(&bss->users);
2183 break;
2184 }
2185 bss = bss->hnext;
2186 }
2187 spin_unlock_bh(&local->sta_bss_lock);
2188 return bss;
2189}
2190
2191static struct ieee80211_sta_bss *
2192ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2193 u8 *mesh_cfg, int mesh_config_len, int freq)
2194{
2195 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2196 struct ieee80211_sta_bss *bss;
2197
2198 if (mesh_config_len != MESH_CFG_LEN)
2199 return NULL;
2200
2201 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
2202 if (!bss)
2203 return NULL;
2204
2205 bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
2206 if (!bss->mesh_cfg) {
2207 kfree(bss);
2208 return NULL;
2209 }
2210
2211 if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) {
2212 bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
2213 if (!bss->mesh_id) {
2214 kfree(bss->mesh_cfg);
2215 kfree(bss);
2216 return NULL;
2217 }
2218 memcpy(bss->mesh_id, mesh_id, mesh_id_len);
2219 }
2220
2221 atomic_inc(&bss->users);
2222 atomic_inc(&bss->users);
2223 memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN);
2224 bss->mesh_id_len = mesh_id_len;
2225 bss->freq = freq;
2226 spin_lock_bh(&local->sta_bss_lock);
2227 /* TODO: order by RSSI? */
2228 list_add_tail(&bss->list, &local->sta_bss_list);
2229 __ieee80211_rx_bss_hash_add(dev, bss);
2230 spin_unlock_bh(&local->sta_bss_lock);
2231 return bss;
2232}
2233#endif
1793 2234
1794static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) 2235static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss)
1795{ 2236{
@@ -1797,6 +2238,8 @@ static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss)
1797 kfree(bss->rsn_ie); 2238 kfree(bss->rsn_ie);
1798 kfree(bss->wmm_ie); 2239 kfree(bss->wmm_ie);
1799 kfree(bss->ht_ie); 2240 kfree(bss->ht_ie);
2241 kfree(bss_mesh_id(bss));
2242 kfree(bss_mesh_cfg(bss));
1800 kfree(bss); 2243 kfree(bss);
1801} 2244}
1802 2245
@@ -1834,6 +2277,204 @@ void ieee80211_rx_bss_list_deinit(struct net_device *dev)
1834} 2277}
1835 2278
1836 2279
2280static int ieee80211_sta_join_ibss(struct net_device *dev,
2281 struct ieee80211_if_sta *ifsta,
2282 struct ieee80211_sta_bss *bss)
2283{
2284 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2285 int res, rates, i, j;
2286 struct sk_buff *skb;
2287 struct ieee80211_mgmt *mgmt;
2288 struct ieee80211_tx_control control;
2289 struct rate_selection ratesel;
2290 u8 *pos;
2291 struct ieee80211_sub_if_data *sdata;
2292 struct ieee80211_supported_band *sband;
2293
2294 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2295
2296 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2297
2298 /* Remove possible STA entries from other IBSS networks. */
2299 sta_info_flush_delayed(sdata);
2300
2301 if (local->ops->reset_tsf) {
2302 /* Reset own TSF to allow time synchronization work. */
2303 local->ops->reset_tsf(local_to_hw(local));
2304 }
2305 memcpy(ifsta->bssid, bss->bssid, ETH_ALEN);
2306 res = ieee80211_if_config(dev);
2307 if (res)
2308 return res;
2309
2310 local->hw.conf.beacon_int = bss->beacon_int >= 10 ? bss->beacon_int : 10;
2311
2312 sdata->drop_unencrypted = bss->capability &
2313 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
2314
2315 res = ieee80211_set_freq(local, bss->freq);
2316
2317 if (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS) {
2318 printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
2319 "%d MHz\n", dev->name, local->oper_channel->center_freq);
2320 return -1;
2321 }
2322
2323 /* Set beacon template */
2324 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
2325 do {
2326 if (!skb)
2327 break;
2328
2329 skb_reserve(skb, local->hw.extra_tx_headroom);
2330
2331 mgmt = (struct ieee80211_mgmt *)
2332 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2333 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2334 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2335 IEEE80211_STYPE_BEACON);
2336 memset(mgmt->da, 0xff, ETH_ALEN);
2337 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
2338 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2339 mgmt->u.beacon.beacon_int =
2340 cpu_to_le16(local->hw.conf.beacon_int);
2341 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability);
2342
2343 pos = skb_put(skb, 2 + ifsta->ssid_len);
2344 *pos++ = WLAN_EID_SSID;
2345 *pos++ = ifsta->ssid_len;
2346 memcpy(pos, ifsta->ssid, ifsta->ssid_len);
2347
2348 rates = bss->supp_rates_len;
2349 if (rates > 8)
2350 rates = 8;
2351 pos = skb_put(skb, 2 + rates);
2352 *pos++ = WLAN_EID_SUPP_RATES;
2353 *pos++ = rates;
2354 memcpy(pos, bss->supp_rates, rates);
2355
2356 if (bss->band == IEEE80211_BAND_2GHZ) {
2357 pos = skb_put(skb, 2 + 1);
2358 *pos++ = WLAN_EID_DS_PARAMS;
2359 *pos++ = 1;
2360 *pos++ = ieee80211_frequency_to_channel(bss->freq);
2361 }
2362
2363 pos = skb_put(skb, 2 + 2);
2364 *pos++ = WLAN_EID_IBSS_PARAMS;
2365 *pos++ = 2;
2366 /* FIX: set ATIM window based on scan results */
2367 *pos++ = 0;
2368 *pos++ = 0;
2369
2370 if (bss->supp_rates_len > 8) {
2371 rates = bss->supp_rates_len - 8;
2372 pos = skb_put(skb, 2 + rates);
2373 *pos++ = WLAN_EID_EXT_SUPP_RATES;
2374 *pos++ = rates;
2375 memcpy(pos, &bss->supp_rates[8], rates);
2376 }
2377
2378 memset(&control, 0, sizeof(control));
2379 rate_control_get_rate(dev, sband, skb, &ratesel);
2380 if (!ratesel.rate) {
2381 printk(KERN_DEBUG "%s: Failed to determine TX rate "
2382 "for IBSS beacon\n", dev->name);
2383 break;
2384 }
2385 control.vif = &sdata->vif;
2386 control.tx_rate = ratesel.rate;
2387 if (sdata->bss_conf.use_short_preamble &&
2388 ratesel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
2389 control.flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
2390 control.antenna_sel_tx = local->hw.conf.antenna_sel_tx;
2391 control.flags |= IEEE80211_TXCTL_NO_ACK;
2392 control.retry_limit = 1;
2393
2394 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC);
2395 if (ifsta->probe_resp) {
2396 mgmt = (struct ieee80211_mgmt *)
2397 ifsta->probe_resp->data;
2398 mgmt->frame_control =
2399 IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2400 IEEE80211_STYPE_PROBE_RESP);
2401 } else {
2402 printk(KERN_DEBUG "%s: Could not allocate ProbeResp "
2403 "template for IBSS\n", dev->name);
2404 }
2405
2406 if (local->ops->beacon_update &&
2407 local->ops->beacon_update(local_to_hw(local),
2408 skb, &control) == 0) {
2409 printk(KERN_DEBUG "%s: Configured IBSS beacon "
2410 "template\n", dev->name);
2411 skb = NULL;
2412 }
2413
2414 rates = 0;
2415 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2416 for (i = 0; i < bss->supp_rates_len; i++) {
2417 int bitrate = (bss->supp_rates[i] & 0x7f) * 5;
2418 for (j = 0; j < sband->n_bitrates; j++)
2419 if (sband->bitrates[j].bitrate == bitrate)
2420 rates |= BIT(j);
2421 }
2422 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
2423
2424 ieee80211_sta_def_wmm_params(dev, bss, 1);
2425 } while (0);
2426
2427 if (skb) {
2428 printk(KERN_DEBUG "%s: Failed to configure IBSS beacon "
2429 "template\n", dev->name);
2430 dev_kfree_skb(skb);
2431 }
2432
2433 ifsta->state = IEEE80211_IBSS_JOINED;
2434 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
2435
2436 ieee80211_rx_bss_put(dev, bss);
2437
2438 return res;
2439}
2440
2441u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
2442 struct ieee802_11_elems *elems,
2443 enum ieee80211_band band)
2444{
2445 struct ieee80211_supported_band *sband;
2446 struct ieee80211_rate *bitrates;
2447 size_t num_rates;
2448 u64 supp_rates;
2449 int i, j;
2450 sband = local->hw.wiphy->bands[band];
2451
2452 if (!sband) {
2453 WARN_ON(1);
2454 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2455 }
2456
2457 bitrates = sband->bitrates;
2458 num_rates = sband->n_bitrates;
2459 supp_rates = 0;
2460 for (i = 0; i < elems->supp_rates_len +
2461 elems->ext_supp_rates_len; i++) {
2462 u8 rate = 0;
2463 int own_rate;
2464 if (i < elems->supp_rates_len)
2465 rate = elems->supp_rates[i];
2466 else if (elems->ext_supp_rates)
2467 rate = elems->ext_supp_rates
2468 [i - elems->supp_rates_len];
2469 own_rate = 5 * (rate & 0x7f);
2470 for (j = 0; j < num_rates; j++)
2471 if (bitrates[j].bitrate == own_rate)
2472 supp_rates |= BIT(j);
2473 }
2474 return supp_rates;
2475}
2476
2477
1837static void ieee80211_rx_bss_info(struct net_device *dev, 2478static void ieee80211_rx_bss_info(struct net_device *dev,
1838 struct ieee80211_mgmt *mgmt, 2479 struct ieee80211_mgmt *mgmt,
1839 size_t len, 2480 size_t len,
@@ -1843,11 +2484,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1843 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2484 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1844 struct ieee802_11_elems elems; 2485 struct ieee802_11_elems elems;
1845 size_t baselen; 2486 size_t baselen;
1846 int channel, clen; 2487 int freq, clen;
1847 struct ieee80211_sta_bss *bss; 2488 struct ieee80211_sta_bss *bss;
1848 struct sta_info *sta; 2489 struct sta_info *sta;
1849 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2490 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1850 u64 timestamp; 2491 u64 beacon_timestamp, rx_timestamp;
2492 struct ieee80211_channel *channel;
1851 DECLARE_MAC_BUF(mac); 2493 DECLARE_MAC_BUF(mac);
1852 DECLARE_MAC_BUF(mac2); 2494 DECLARE_MAC_BUF(mac2);
1853 2495
@@ -1864,104 +2506,77 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1864 if (baselen > len) 2506 if (baselen > len)
1865 return; 2507 return;
1866 2508
1867 timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); 2509 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
2510 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
1868 2511
1869 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && 2512 if (ieee80211_vif_is_mesh(&sdata->vif) && elems.mesh_id &&
1870 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) { 2513 elems.mesh_config && mesh_matches_local(&elems, dev)) {
1871#ifdef CONFIG_MAC80211_IBSS_DEBUG 2514 u64 rates = ieee80211_sta_get_rates(local, &elems,
1872 static unsigned long last_tsf_debug = 0; 2515 rx_status->band);
1873 u64 tsf; 2516
1874 if (local->ops->get_tsf) 2517 mesh_neighbour_update(mgmt->sa, rates, dev,
1875 tsf = local->ops->get_tsf(local_to_hw(local)); 2518 mesh_peer_accepts_plinks(&elems, dev));
1876 else
1877 tsf = -1LLU;
1878 if (time_after(jiffies, last_tsf_debug + 5 * HZ)) {
1879 printk(KERN_DEBUG "RX beacon SA=%s BSSID="
1880 "%s TSF=0x%llx BCN=0x%llx diff=%lld "
1881 "@%lu\n",
1882 print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->bssid),
1883 (unsigned long long)tsf,
1884 (unsigned long long)timestamp,
1885 (unsigned long long)(tsf - timestamp),
1886 jiffies);
1887 last_tsf_debug = jiffies;
1888 }
1889#endif /* CONFIG_MAC80211_IBSS_DEBUG */
1890 } 2519 }
1891 2520
1892 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 2521 rcu_read_lock();
1893 2522
1894 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && 2523 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates &&
1895 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && 2524 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 &&
1896 (sta = sta_info_get(local, mgmt->sa))) { 2525 (sta = sta_info_get(local, mgmt->sa))) {
1897 struct ieee80211_hw_mode *mode; 2526 u64 prev_rates;
1898 struct ieee80211_rate *rates; 2527 u64 supp_rates = ieee80211_sta_get_rates(local, &elems,
1899 size_t num_rates; 2528 rx_status->band);
1900 u32 supp_rates, prev_rates;
1901 int i, j;
1902
1903 mode = local->sta_sw_scanning ?
1904 local->scan_hw_mode : local->oper_hw_mode;
1905
1906 if (local->sta_hw_scanning) {
1907 /* search for the correct mode matches the beacon */
1908 list_for_each_entry(mode, &local->modes_list, list)
1909 if (mode->mode == rx_status->phymode)
1910 break;
1911
1912 if (mode == NULL)
1913 mode = local->oper_hw_mode;
1914 }
1915 rates = mode->rates;
1916 num_rates = mode->num_rates;
1917
1918 supp_rates = 0;
1919 for (i = 0; i < elems.supp_rates_len +
1920 elems.ext_supp_rates_len; i++) {
1921 u8 rate = 0;
1922 int own_rate;
1923 if (i < elems.supp_rates_len)
1924 rate = elems.supp_rates[i];
1925 else if (elems.ext_supp_rates)
1926 rate = elems.ext_supp_rates
1927 [i - elems.supp_rates_len];
1928 own_rate = 5 * (rate & 0x7f);
1929 for (j = 0; j < num_rates; j++)
1930 if (rates[j].rate == own_rate)
1931 supp_rates |= BIT(j);
1932 }
1933 2529
1934 prev_rates = sta->supp_rates; 2530 prev_rates = sta->supp_rates[rx_status->band];
1935 sta->supp_rates &= supp_rates; 2531 sta->supp_rates[rx_status->band] &= supp_rates;
1936 if (sta->supp_rates == 0) { 2532 if (sta->supp_rates[rx_status->band] == 0) {
1937 /* No matching rates - this should not really happen. 2533 /* No matching rates - this should not really happen.
1938 * Make sure that at least one rate is marked 2534 * Make sure that at least one rate is marked
1939 * supported to avoid issues with TX rate ctrl. */ 2535 * supported to avoid issues with TX rate ctrl. */
1940 sta->supp_rates = sdata->u.sta.supp_rates_bits; 2536 sta->supp_rates[rx_status->band] =
2537 sdata->u.sta.supp_rates_bits[rx_status->band];
1941 } 2538 }
1942 if (sta->supp_rates != prev_rates) { 2539 if (sta->supp_rates[rx_status->band] != prev_rates) {
1943 printk(KERN_DEBUG "%s: updated supp_rates set for " 2540 printk(KERN_DEBUG "%s: updated supp_rates set for "
1944 "%s based on beacon info (0x%x & 0x%x -> " 2541 "%s based on beacon info (0x%llx & 0x%llx -> "
1945 "0x%x)\n", 2542 "0x%llx)\n",
1946 dev->name, print_mac(mac, sta->addr), prev_rates, 2543 dev->name, print_mac(mac, sta->addr),
1947 supp_rates, sta->supp_rates); 2544 (unsigned long long) prev_rates,
2545 (unsigned long long) supp_rates,
2546 (unsigned long long) sta->supp_rates[rx_status->band]);
1948 } 2547 }
1949 sta_info_put(sta);
1950 } 2548 }
1951 2549
1952 if (!elems.ssid) 2550 rcu_read_unlock();
1953 return;
1954 2551
1955 if (elems.ds_params && elems.ds_params_len == 1) 2552 if (elems.ds_params && elems.ds_params_len == 1)
1956 channel = elems.ds_params[0]; 2553 freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
1957 else 2554 else
1958 channel = rx_status->channel; 2555 freq = rx_status->freq;
1959 2556
1960 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, channel, 2557 channel = ieee80211_get_channel(local->hw.wiphy, freq);
1961 elems.ssid, elems.ssid_len); 2558
1962 if (!bss) { 2559 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
1963 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, channel, 2560 return;
2561
2562#ifdef CONFIG_MAC80211_MESH
2563 if (elems.mesh_config)
2564 bss = ieee80211_rx_mesh_bss_get(dev, elems.mesh_id,
2565 elems.mesh_id_len, elems.mesh_config, freq);
2566 else
2567#endif
2568 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq,
1964 elems.ssid, elems.ssid_len); 2569 elems.ssid, elems.ssid_len);
2570 if (!bss) {
2571#ifdef CONFIG_MAC80211_MESH
2572 if (elems.mesh_config)
2573 bss = ieee80211_rx_mesh_bss_add(dev, elems.mesh_id,
2574 elems.mesh_id_len, elems.mesh_config,
2575 elems.mesh_config_len, freq);
2576 else
2577#endif
2578 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq,
2579 elems.ssid, elems.ssid_len);
1965 if (!bss) 2580 if (!bss)
1966 return; 2581 return;
1967 } else { 2582 } else {
@@ -1973,18 +2588,29 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1973#endif 2588#endif
1974 } 2589 }
1975 2590
1976 if (bss->probe_resp && beacon) {
1977 /* Do not allow beacon to override data from Probe Response. */
1978 ieee80211_rx_bss_put(dev, bss);
1979 return;
1980 }
1981
1982 /* save the ERP value so that it is available at association time */ 2591 /* save the ERP value so that it is available at association time */
1983 if (elems.erp_info && elems.erp_info_len >= 1) { 2592 if (elems.erp_info && elems.erp_info_len >= 1) {
1984 bss->erp_value = elems.erp_info[0]; 2593 bss->erp_value = elems.erp_info[0];
1985 bss->has_erp_value = 1; 2594 bss->has_erp_value = 1;
1986 } 2595 }
1987 2596
2597 if (elems.ht_cap_elem &&
2598 (!bss->ht_ie || bss->ht_ie_len != elems.ht_cap_elem_len ||
2599 memcmp(bss->ht_ie, elems.ht_cap_elem, elems.ht_cap_elem_len))) {
2600 kfree(bss->ht_ie);
2601 bss->ht_ie = kmalloc(elems.ht_cap_elem_len + 2, GFP_ATOMIC);
2602 if (bss->ht_ie) {
2603 memcpy(bss->ht_ie, elems.ht_cap_elem - 2,
2604 elems.ht_cap_elem_len + 2);
2605 bss->ht_ie_len = elems.ht_cap_elem_len + 2;
2606 } else
2607 bss->ht_ie_len = 0;
2608 } else if (!elems.ht_cap_elem && bss->ht_ie) {
2609 kfree(bss->ht_ie);
2610 bss->ht_ie = NULL;
2611 bss->ht_ie_len = 0;
2612 }
2613
1988 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 2614 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
1989 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 2615 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
1990 2616
@@ -2006,6 +2632,26 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2006 bss->supp_rates_len += clen; 2632 bss->supp_rates_len += clen;
2007 } 2633 }
2008 2634
2635 bss->band = rx_status->band;
2636
2637 bss->timestamp = beacon_timestamp;
2638 bss->last_update = jiffies;
2639 bss->rssi = rx_status->ssi;
2640 bss->signal = rx_status->signal;
2641 bss->noise = rx_status->noise;
2642 if (!beacon && !bss->probe_resp)
2643 bss->probe_resp = true;
2644
2645 /*
2646 * In STA mode, the remaining parameters should not be overridden
2647 * by beacons because they're not necessarily accurate there.
2648 */
2649 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
2650 bss->probe_resp && beacon) {
2651 ieee80211_rx_bss_put(dev, bss);
2652 return;
2653 }
2654
2009 if (elems.wpa && 2655 if (elems.wpa &&
2010 (!bss->wpa_ie || bss->wpa_ie_len != elems.wpa_len || 2656 (!bss->wpa_ie || bss->wpa_ie_len != elems.wpa_len ||
2011 memcmp(bss->wpa_ie, elems.wpa, elems.wpa_len))) { 2657 memcmp(bss->wpa_ie, elems.wpa, elems.wpa_len))) {
@@ -2038,6 +2684,20 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2038 bss->rsn_ie_len = 0; 2684 bss->rsn_ie_len = 0;
2039 } 2685 }
2040 2686
2687 /*
2688 * Cf.
2689 * http://www.wipo.int/pctdb/en/wo.jsp?wo=2007047181&IA=WO2007047181&DISPLAY=DESC
2690 *
2691 * quoting:
2692 *
2693 * In particular, "Wi-Fi CERTIFIED for WMM - Support for Multimedia
2694 * Applications with Quality of Service in Wi-Fi Networks," Wi- Fi
2695 * Alliance (September 1, 2004) is incorporated by reference herein.
2696 * The inclusion of the WMM Parameters in probe responses and
2697 * association responses is mandatory for WMM enabled networks. The
2698 * inclusion of the WMM Parameters in beacons, however, is optional.
2699 */
2700
2041 if (elems.wmm_param && 2701 if (elems.wmm_param &&
2042 (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_param_len || 2702 (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_param_len ||
2043 memcmp(bss->wmm_ie, elems.wmm_param, elems.wmm_param_len))) { 2703 memcmp(bss->wmm_ie, elems.wmm_param, elems.wmm_param_len))) {
@@ -2054,44 +2714,62 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2054 bss->wmm_ie = NULL; 2714 bss->wmm_ie = NULL;
2055 bss->wmm_ie_len = 0; 2715 bss->wmm_ie_len = 0;
2056 } 2716 }
2057 if (elems.ht_cap_elem && 2717
2058 (!bss->ht_ie || bss->ht_ie_len != elems.ht_cap_elem_len || 2718 /* check if we need to merge IBSS */
2059 memcmp(bss->ht_ie, elems.ht_cap_elem, elems.ht_cap_elem_len))) { 2719 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon &&
2060 kfree(bss->ht_ie); 2720 !local->sta_sw_scanning && !local->sta_hw_scanning &&
2061 bss->ht_ie = kmalloc(elems.ht_cap_elem_len + 2, GFP_ATOMIC); 2721 bss->capability & WLAN_CAPABILITY_IBSS &&
2062 if (bss->ht_ie) { 2722 bss->freq == local->oper_channel->center_freq &&
2063 memcpy(bss->ht_ie, elems.ht_cap_elem - 2, 2723 elems.ssid_len == sdata->u.sta.ssid_len &&
2064 elems.ht_cap_elem_len + 2); 2724 memcmp(elems.ssid, sdata->u.sta.ssid, sdata->u.sta.ssid_len) == 0) {
2065 bss->ht_ie_len = elems.ht_cap_elem_len + 2; 2725 if (rx_status->flag & RX_FLAG_TSFT) {
2066 } else 2726 /* in order for correct IBSS merging we need mactime
2067 bss->ht_ie_len = 0; 2727 *
2068 } else if (!elems.ht_cap_elem && bss->ht_ie) { 2728 * since mactime is defined as the time the first data
2069 kfree(bss->ht_ie); 2729 * symbol of the frame hits the PHY, and the timestamp
2070 bss->ht_ie = NULL; 2730 * of the beacon is defined as "the time that the data
2071 bss->ht_ie_len = 0; 2731 * symbol containing the first bit of the timestamp is
2732 * transmitted to the PHY plus the transmitting STA’s
2733 * delays through its local PHY from the MAC-PHY
2734 * interface to its interface with the WM"
2735 * (802.11 11.1.2) - equals the time this bit arrives at
2736 * the receiver - we have to take into account the
2737 * offset between the two.
2738 * e.g: at 1 MBit that means mactime is 192 usec earlier
2739 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
2740 */
2741 int rate = local->hw.wiphy->bands[rx_status->band]->
2742 bitrates[rx_status->rate_idx].bitrate;
2743 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
2744 } else if (local && local->ops && local->ops->get_tsf)
2745 /* second best option: get current TSF */
2746 rx_timestamp = local->ops->get_tsf(local_to_hw(local));
2747 else
2748 /* can't merge without knowing the TSF */
2749 rx_timestamp = -1LLU;
2750#ifdef CONFIG_MAC80211_IBSS_DEBUG
2751 printk(KERN_DEBUG "RX beacon SA=%s BSSID="
2752 "%s TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
2753 print_mac(mac, mgmt->sa),
2754 print_mac(mac2, mgmt->bssid),
2755 (unsigned long long)rx_timestamp,
2756 (unsigned long long)beacon_timestamp,
2757 (unsigned long long)(rx_timestamp - beacon_timestamp),
2758 jiffies);
2759#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2760 if (beacon_timestamp > rx_timestamp) {
2761#ifndef CONFIG_MAC80211_IBSS_DEBUG
2762 if (net_ratelimit())
2763#endif
2764 printk(KERN_DEBUG "%s: beacon TSF higher than "
2765 "local TSF - IBSS merge with BSSID %s\n",
2766 dev->name, print_mac(mac, mgmt->bssid));
2767 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss);
2768 ieee80211_ibss_add_sta(dev, NULL,
2769 mgmt->bssid, mgmt->sa);
2770 }
2072 } 2771 }
2073 2772
2074 bss->hw_mode = rx_status->phymode;
2075 bss->freq = rx_status->freq;
2076 if (channel != rx_status->channel &&
2077 (bss->hw_mode == MODE_IEEE80211G ||
2078 bss->hw_mode == MODE_IEEE80211B) &&
2079 channel >= 1 && channel <= 14) {
2080 static const int freq_list[] = {
2081 2412, 2417, 2422, 2427, 2432, 2437, 2442,
2082 2447, 2452, 2457, 2462, 2467, 2472, 2484
2083 };
2084 /* IEEE 802.11g/b mode can receive packets from neighboring
2085 * channels, so map the channel into frequency. */
2086 bss->freq = freq_list[channel - 1];
2087 }
2088 bss->timestamp = timestamp;
2089 bss->last_update = jiffies;
2090 bss->rssi = rx_status->ssi;
2091 bss->signal = rx_status->signal;
2092 bss->noise = rx_status->noise;
2093 if (!beacon)
2094 bss->probe_resp++;
2095 ieee80211_rx_bss_put(dev, bss); 2773 ieee80211_rx_bss_put(dev, bss);
2096} 2774}
2097 2775
@@ -2136,6 +2814,17 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2136 2814
2137 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 2815 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2138 2816
2817 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
2818 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2819 elems.wmm_param_len);
2820 }
2821
2822 /* Do not send changes to driver if we are scanning. This removes
2823 * requirement that driver's bss_info_changed function needs to be
2824 * atomic. */
2825 if (local->sta_sw_scanning || local->sta_hw_scanning)
2826 return;
2827
2139 if (elems.erp_info && elems.erp_info_len >= 1) 2828 if (elems.erp_info && elems.erp_info_len >= 1)
2140 changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]); 2829 changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]);
2141 else { 2830 else {
@@ -2145,25 +2834,14 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2145 } 2834 }
2146 2835
2147 if (elems.ht_cap_elem && elems.ht_info_elem && 2836 if (elems.ht_cap_elem && elems.ht_info_elem &&
2148 elems.wmm_param && local->ops->conf_ht && 2837 elems.wmm_param && conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
2149 conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
2150 struct ieee80211_ht_bss_info bss_info; 2838 struct ieee80211_ht_bss_info bss_info;
2151 2839
2152 ieee80211_ht_addt_info_ie_to_ht_bss_info( 2840 ieee80211_ht_addt_info_ie_to_ht_bss_info(
2153 (struct ieee80211_ht_addt_info *) 2841 (struct ieee80211_ht_addt_info *)
2154 elems.ht_info_elem, &bss_info); 2842 elems.ht_info_elem, &bss_info);
2155 /* check if AP changed bss inforamation */ 2843 changed |= ieee80211_handle_ht(local, 1, &conf->ht_conf,
2156 if ((conf->ht_bss_conf.primary_channel != 2844 &bss_info);
2157 bss_info.primary_channel) ||
2158 (conf->ht_bss_conf.bss_cap != bss_info.bss_cap) ||
2159 (conf->ht_bss_conf.bss_op_mode != bss_info.bss_op_mode))
2160 ieee80211_hw_config_ht(local, 1, &conf->ht_conf,
2161 &bss_info);
2162 }
2163
2164 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
2165 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2166 elems.wmm_param_len);
2167 } 2845 }
2168 2846
2169 ieee80211_bss_info_change_notify(sdata, changed); 2847 ieee80211_bss_info_change_notify(sdata, changed);
@@ -2247,8 +2925,11 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2247static void ieee80211_rx_mgmt_action(struct net_device *dev, 2925static void ieee80211_rx_mgmt_action(struct net_device *dev,
2248 struct ieee80211_if_sta *ifsta, 2926 struct ieee80211_if_sta *ifsta,
2249 struct ieee80211_mgmt *mgmt, 2927 struct ieee80211_mgmt *mgmt,
2250 size_t len) 2928 size_t len,
2929 struct ieee80211_rx_status *rx_status)
2251{ 2930{
2931 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2932
2252 if (len < IEEE80211_MIN_ACTION_SIZE) 2933 if (len < IEEE80211_MIN_ACTION_SIZE)
2253 return; 2934 return;
2254 2935
@@ -2261,6 +2942,12 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
2261 break; 2942 break;
2262 ieee80211_sta_process_addba_request(dev, mgmt, len); 2943 ieee80211_sta_process_addba_request(dev, mgmt, len);
2263 break; 2944 break;
2945 case WLAN_ACTION_ADDBA_RESP:
2946 if (len < (IEEE80211_MIN_ACTION_SIZE +
2947 sizeof(mgmt->u.action.u.addba_resp)))
2948 break;
2949 ieee80211_sta_process_addba_resp(dev, mgmt, len);
2950 break;
2264 case WLAN_ACTION_DELBA: 2951 case WLAN_ACTION_DELBA:
2265 if (len < (IEEE80211_MIN_ACTION_SIZE + 2952 if (len < (IEEE80211_MIN_ACTION_SIZE +
2266 sizeof(mgmt->u.action.u.delba))) 2953 sizeof(mgmt->u.action.u.delba)))
@@ -2274,7 +2961,18 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
2274 break; 2961 break;
2275 } 2962 }
2276 break; 2963 break;
2964 case PLINK_CATEGORY:
2965 if (ieee80211_vif_is_mesh(&sdata->vif))
2966 mesh_rx_plink_frame(dev, mgmt, len, rx_status);
2967 break;
2968 case MESH_PATH_SEL_CATEGORY:
2969 if (ieee80211_vif_is_mesh(&sdata->vif))
2970 mesh_rx_path_sel_frame(dev, mgmt, len);
2971 break;
2277 default: 2972 default:
2973 if (net_ratelimit())
2974 printk(KERN_DEBUG "%s: Rx unknown action frame - "
2975 "category=%d\n", dev->name, mgmt->u.action.category);
2278 break; 2976 break;
2279 } 2977 }
2280} 2978}
@@ -2301,13 +2999,13 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
2301 case IEEE80211_STYPE_PROBE_REQ: 2999 case IEEE80211_STYPE_PROBE_REQ:
2302 case IEEE80211_STYPE_PROBE_RESP: 3000 case IEEE80211_STYPE_PROBE_RESP:
2303 case IEEE80211_STYPE_BEACON: 3001 case IEEE80211_STYPE_BEACON:
3002 case IEEE80211_STYPE_ACTION:
2304 memcpy(skb->cb, rx_status, sizeof(*rx_status)); 3003 memcpy(skb->cb, rx_status, sizeof(*rx_status));
2305 case IEEE80211_STYPE_AUTH: 3004 case IEEE80211_STYPE_AUTH:
2306 case IEEE80211_STYPE_ASSOC_RESP: 3005 case IEEE80211_STYPE_ASSOC_RESP:
2307 case IEEE80211_STYPE_REASSOC_RESP: 3006 case IEEE80211_STYPE_REASSOC_RESP:
2308 case IEEE80211_STYPE_DEAUTH: 3007 case IEEE80211_STYPE_DEAUTH:
2309 case IEEE80211_STYPE_DISASSOC: 3008 case IEEE80211_STYPE_DISASSOC:
2310 case IEEE80211_STYPE_ACTION:
2311 skb_queue_tail(&ifsta->skb_queue, skb); 3009 skb_queue_tail(&ifsta->skb_queue, skb);
2312 queue_work(local->hw.workqueue, &ifsta->work); 3010 queue_work(local->hw.workqueue, &ifsta->work);
2313 return; 3011 return;
@@ -2366,7 +3064,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
2366 ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len); 3064 ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len);
2367 break; 3065 break;
2368 case IEEE80211_STYPE_ACTION: 3066 case IEEE80211_STYPE_ACTION:
2369 ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len); 3067 ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len, rx_status);
2370 break; 3068 break;
2371 } 3069 }
2372 3070
@@ -2374,7 +3072,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
2374} 3072}
2375 3073
2376 3074
2377ieee80211_txrx_result 3075ieee80211_rx_result
2378ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, 3076ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
2379 struct ieee80211_rx_status *rx_status) 3077 struct ieee80211_rx_status *rx_status)
2380{ 3078{
@@ -2382,31 +3080,31 @@ ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
2382 u16 fc; 3080 u16 fc;
2383 3081
2384 if (skb->len < 2) 3082 if (skb->len < 2)
2385 return TXRX_DROP; 3083 return RX_DROP_UNUSABLE;
2386 3084
2387 mgmt = (struct ieee80211_mgmt *) skb->data; 3085 mgmt = (struct ieee80211_mgmt *) skb->data;
2388 fc = le16_to_cpu(mgmt->frame_control); 3086 fc = le16_to_cpu(mgmt->frame_control);
2389 3087
2390 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) 3088 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
2391 return TXRX_CONTINUE; 3089 return RX_CONTINUE;
2392 3090
2393 if (skb->len < 24) 3091 if (skb->len < 24)
2394 return TXRX_DROP; 3092 return RX_DROP_MONITOR;
2395 3093
2396 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { 3094 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2397 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) { 3095 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) {
2398 ieee80211_rx_mgmt_probe_resp(dev, mgmt, 3096 ieee80211_rx_mgmt_probe_resp(dev, mgmt,
2399 skb->len, rx_status); 3097 skb->len, rx_status);
2400 dev_kfree_skb(skb); 3098 dev_kfree_skb(skb);
2401 return TXRX_QUEUED; 3099 return RX_QUEUED;
2402 } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) { 3100 } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) {
2403 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, 3101 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len,
2404 rx_status); 3102 rx_status);
2405 dev_kfree_skb(skb); 3103 dev_kfree_skb(skb);
2406 return TXRX_QUEUED; 3104 return RX_QUEUED;
2407 } 3105 }
2408 } 3106 }
2409 return TXRX_CONTINUE; 3107 return RX_CONTINUE;
2410} 3108}
2411 3109
2412 3110
@@ -2415,45 +3113,46 @@ static int ieee80211_sta_active_ibss(struct net_device *dev)
2415 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3113 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2416 int active = 0; 3114 int active = 0;
2417 struct sta_info *sta; 3115 struct sta_info *sta;
3116 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3117
3118 rcu_read_lock();
2418 3119
2419 read_lock_bh(&local->sta_lock); 3120 list_for_each_entry_rcu(sta, &local->sta_list, list) {
2420 list_for_each_entry(sta, &local->sta_list, list) { 3121 if (sta->sdata == sdata &&
2421 if (sta->dev == dev &&
2422 time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL, 3122 time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
2423 jiffies)) { 3123 jiffies)) {
2424 active++; 3124 active++;
2425 break; 3125 break;
2426 } 3126 }
2427 } 3127 }
2428 read_unlock_bh(&local->sta_lock); 3128
3129 rcu_read_unlock();
2429 3130
2430 return active; 3131 return active;
2431} 3132}
2432 3133
2433 3134
2434static void ieee80211_sta_expire(struct net_device *dev) 3135static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time)
2435{ 3136{
2436 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3137 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2437 struct sta_info *sta, *tmp; 3138 struct sta_info *sta, *tmp;
2438 LIST_HEAD(tmp_list); 3139 LIST_HEAD(tmp_list);
2439 DECLARE_MAC_BUF(mac); 3140 DECLARE_MAC_BUF(mac);
3141 unsigned long flags;
2440 3142
2441 write_lock_bh(&local->sta_lock); 3143 spin_lock_irqsave(&local->sta_lock, flags);
2442 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) 3144 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
2443 if (time_after(jiffies, sta->last_rx + 3145 if (time_after(jiffies, sta->last_rx + exp_time)) {
2444 IEEE80211_IBSS_INACTIVITY_LIMIT)) {
2445 printk(KERN_DEBUG "%s: expiring inactive STA %s\n", 3146 printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
2446 dev->name, print_mac(mac, sta->addr)); 3147 dev->name, print_mac(mac, sta->addr));
2447 __sta_info_get(sta); 3148 __sta_info_unlink(&sta);
2448 sta_info_remove(sta); 3149 if (sta)
2449 list_add(&sta->list, &tmp_list); 3150 list_add(&sta->list, &tmp_list);
2450 } 3151 }
2451 write_unlock_bh(&local->sta_lock); 3152 spin_unlock_irqrestore(&local->sta_lock, flags);
2452 3153
2453 list_for_each_entry_safe(sta, tmp, &tmp_list, list) { 3154 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
2454 sta_info_free(sta); 3155 sta_info_destroy(sta);
2455 sta_info_put(sta);
2456 }
2457} 3156}
2458 3157
2459 3158
@@ -2462,7 +3161,7 @@ static void ieee80211_sta_merge_ibss(struct net_device *dev,
2462{ 3161{
2463 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 3162 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
2464 3163
2465 ieee80211_sta_expire(dev); 3164 ieee80211_sta_expire(dev, IEEE80211_IBSS_INACTIVITY_LIMIT);
2466 if (ieee80211_sta_active_ibss(dev)) 3165 if (ieee80211_sta_active_ibss(dev))
2467 return; 3166 return;
2468 3167
@@ -2472,6 +3171,36 @@ static void ieee80211_sta_merge_ibss(struct net_device *dev,
2472} 3171}
2473 3172
2474 3173
3174#ifdef CONFIG_MAC80211_MESH
3175static void ieee80211_mesh_housekeeping(struct net_device *dev,
3176 struct ieee80211_if_sta *ifsta)
3177{
3178 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3179 bool free_plinks;
3180
3181 ieee80211_sta_expire(dev, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
3182 mesh_path_expire(dev);
3183
3184 free_plinks = mesh_plink_availables(sdata);
3185 if (free_plinks != sdata->u.sta.accepting_plinks)
3186 ieee80211_if_config_beacon(dev);
3187
3188 mod_timer(&ifsta->timer, jiffies +
3189 IEEE80211_MESH_HOUSEKEEPING_INTERVAL);
3190}
3191
3192
3193void ieee80211_start_mesh(struct net_device *dev)
3194{
3195 struct ieee80211_if_sta *ifsta;
3196 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3197 ifsta = &sdata->u.sta;
3198 ifsta->state = IEEE80211_MESH_UP;
3199 ieee80211_sta_timer((unsigned long)sdata);
3200}
3201#endif
3202
3203
2475void ieee80211_sta_timer(unsigned long data) 3204void ieee80211_sta_timer(unsigned long data)
2476{ 3205{
2477 struct ieee80211_sub_if_data *sdata = 3206 struct ieee80211_sub_if_data *sdata =
@@ -2483,7 +3212,6 @@ void ieee80211_sta_timer(unsigned long data)
2483 queue_work(local->hw.workqueue, &ifsta->work); 3212 queue_work(local->hw.workqueue, &ifsta->work);
2484} 3213}
2485 3214
2486
2487void ieee80211_sta_work(struct work_struct *work) 3215void ieee80211_sta_work(struct work_struct *work)
2488{ 3216{
2489 struct ieee80211_sub_if_data *sdata = 3217 struct ieee80211_sub_if_data *sdata =
@@ -2500,7 +3228,8 @@ void ieee80211_sta_work(struct work_struct *work)
2500 return; 3228 return;
2501 3229
2502 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 3230 if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
2503 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { 3231 sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
3232 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) {
2504 printk(KERN_DEBUG "%s: ieee80211_sta_work: non-STA interface " 3233 printk(KERN_DEBUG "%s: ieee80211_sta_work: non-STA interface "
2505 "(type=%d)\n", dev->name, sdata->vif.type); 3234 "(type=%d)\n", dev->name, sdata->vif.type);
2506 return; 3235 return;
@@ -2510,6 +3239,13 @@ void ieee80211_sta_work(struct work_struct *work)
2510 while ((skb = skb_dequeue(&ifsta->skb_queue))) 3239 while ((skb = skb_dequeue(&ifsta->skb_queue)))
2511 ieee80211_sta_rx_queued_mgmt(dev, skb); 3240 ieee80211_sta_rx_queued_mgmt(dev, skb);
2512 3241
3242#ifdef CONFIG_MAC80211_MESH
3243 if (ifsta->preq_queue_len &&
3244 time_after(jiffies,
3245 ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval)))
3246 mesh_path_start_discovery(dev);
3247#endif
3248
2513 if (ifsta->state != IEEE80211_AUTHENTICATE && 3249 if (ifsta->state != IEEE80211_AUTHENTICATE &&
2514 ifsta->state != IEEE80211_ASSOCIATE && 3250 ifsta->state != IEEE80211_ASSOCIATE &&
2515 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { 3251 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
@@ -2545,6 +3281,11 @@ void ieee80211_sta_work(struct work_struct *work)
2545 case IEEE80211_IBSS_JOINED: 3281 case IEEE80211_IBSS_JOINED:
2546 ieee80211_sta_merge_ibss(dev, ifsta); 3282 ieee80211_sta_merge_ibss(dev, ifsta);
2547 break; 3283 break;
3284#ifdef CONFIG_MAC80211_MESH
3285 case IEEE80211_MESH_UP:
3286 ieee80211_mesh_housekeeping(dev, ifsta);
3287 break;
3288#endif
2548 default: 3289 default:
2549 printk(KERN_DEBUG "ieee80211_sta_work: Unknown state %d\n", 3290 printk(KERN_DEBUG "ieee80211_sta_work: Unknown state %d\n",
2550 ifsta->state); 3291 ifsta->state);
@@ -2655,7 +3396,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
2655 } 3396 }
2656 3397
2657 spin_lock_bh(&local->sta_bss_lock); 3398 spin_lock_bh(&local->sta_bss_lock);
2658 freq = local->oper_channel->freq; 3399 freq = local->oper_channel->center_freq;
2659 list_for_each_entry(bss, &local->sta_bss_list, list) { 3400 list_for_each_entry(bss, &local->sta_bss_list, list) {
2660 if (!(bss->capability & WLAN_CAPABILITY_ESS)) 3401 if (!(bss->capability & WLAN_CAPABILITY_ESS))
2661 continue; 3402 continue;
@@ -2686,11 +3427,12 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
2686 spin_unlock_bh(&local->sta_bss_lock); 3427 spin_unlock_bh(&local->sta_bss_lock);
2687 3428
2688 if (selected) { 3429 if (selected) {
2689 ieee80211_set_channel(local, -1, selected->freq); 3430 ieee80211_set_freq(local, selected->freq);
2690 if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) 3431 if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
2691 ieee80211_sta_set_ssid(dev, selected->ssid, 3432 ieee80211_sta_set_ssid(dev, selected->ssid,
2692 selected->ssid_len); 3433 selected->ssid_len);
2693 ieee80211_sta_set_bssid(dev, selected->bssid); 3434 ieee80211_sta_set_bssid(dev, selected->bssid);
3435 ieee80211_sta_def_wmm_params(dev, selected, 0);
2694 ieee80211_rx_bss_put(dev, selected); 3436 ieee80211_rx_bss_put(dev, selected);
2695 ifsta->state = IEEE80211_AUTHENTICATE; 3437 ifsta->state = IEEE80211_AUTHENTICATE;
2696 ieee80211_sta_reset_auth(dev, ifsta); 3438 ieee80211_sta_reset_auth(dev, ifsta);
@@ -2710,162 +3452,6 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
2710 return -1; 3452 return -1;
2711} 3453}
2712 3454
2713static int ieee80211_sta_join_ibss(struct net_device *dev,
2714 struct ieee80211_if_sta *ifsta,
2715 struct ieee80211_sta_bss *bss)
2716{
2717 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2718 int res, rates, i, j;
2719 struct sk_buff *skb;
2720 struct ieee80211_mgmt *mgmt;
2721 struct ieee80211_tx_control control;
2722 struct ieee80211_hw_mode *mode;
2723 struct rate_selection ratesel;
2724 u8 *pos;
2725 struct ieee80211_sub_if_data *sdata;
2726
2727 /* Remove possible STA entries from other IBSS networks. */
2728 sta_info_flush(local, NULL);
2729
2730 if (local->ops->reset_tsf) {
2731 /* Reset own TSF to allow time synchronization work. */
2732 local->ops->reset_tsf(local_to_hw(local));
2733 }
2734 memcpy(ifsta->bssid, bss->bssid, ETH_ALEN);
2735 res = ieee80211_if_config(dev);
2736 if (res)
2737 return res;
2738
2739 local->hw.conf.beacon_int = bss->beacon_int >= 10 ? bss->beacon_int : 10;
2740
2741 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2742 sdata->drop_unencrypted = bss->capability &
2743 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
2744
2745 res = ieee80211_set_channel(local, -1, bss->freq);
2746
2747 if (!(local->oper_channel->flag & IEEE80211_CHAN_W_IBSS)) {
2748 printk(KERN_DEBUG "%s: IBSS not allowed on channel %d "
2749 "(%d MHz)\n", dev->name, local->hw.conf.channel,
2750 local->hw.conf.freq);
2751 return -1;
2752 }
2753
2754 /* Set beacon template based on scan results */
2755 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
2756 do {
2757 if (!skb)
2758 break;
2759
2760 skb_reserve(skb, local->hw.extra_tx_headroom);
2761
2762 mgmt = (struct ieee80211_mgmt *)
2763 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2764 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2765 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2766 IEEE80211_STYPE_BEACON);
2767 memset(mgmt->da, 0xff, ETH_ALEN);
2768 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
2769 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2770 mgmt->u.beacon.beacon_int =
2771 cpu_to_le16(local->hw.conf.beacon_int);
2772 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability);
2773
2774 pos = skb_put(skb, 2 + ifsta->ssid_len);
2775 *pos++ = WLAN_EID_SSID;
2776 *pos++ = ifsta->ssid_len;
2777 memcpy(pos, ifsta->ssid, ifsta->ssid_len);
2778
2779 rates = bss->supp_rates_len;
2780 if (rates > 8)
2781 rates = 8;
2782 pos = skb_put(skb, 2 + rates);
2783 *pos++ = WLAN_EID_SUPP_RATES;
2784 *pos++ = rates;
2785 memcpy(pos, bss->supp_rates, rates);
2786
2787 pos = skb_put(skb, 2 + 1);
2788 *pos++ = WLAN_EID_DS_PARAMS;
2789 *pos++ = 1;
2790 *pos++ = bss->channel;
2791
2792 pos = skb_put(skb, 2 + 2);
2793 *pos++ = WLAN_EID_IBSS_PARAMS;
2794 *pos++ = 2;
2795 /* FIX: set ATIM window based on scan results */
2796 *pos++ = 0;
2797 *pos++ = 0;
2798
2799 if (bss->supp_rates_len > 8) {
2800 rates = bss->supp_rates_len - 8;
2801 pos = skb_put(skb, 2 + rates);
2802 *pos++ = WLAN_EID_EXT_SUPP_RATES;
2803 *pos++ = rates;
2804 memcpy(pos, &bss->supp_rates[8], rates);
2805 }
2806
2807 memset(&control, 0, sizeof(control));
2808 rate_control_get_rate(dev, local->oper_hw_mode, skb, &ratesel);
2809 if (!ratesel.rate) {
2810 printk(KERN_DEBUG "%s: Failed to determine TX rate "
2811 "for IBSS beacon\n", dev->name);
2812 break;
2813 }
2814 control.vif = &sdata->vif;
2815 control.tx_rate =
2816 (sdata->bss_conf.use_short_preamble &&
2817 (ratesel.rate->flags & IEEE80211_RATE_PREAMBLE2)) ?
2818 ratesel.rate->val2 : ratesel.rate->val;
2819 control.antenna_sel_tx = local->hw.conf.antenna_sel_tx;
2820 control.power_level = local->hw.conf.power_level;
2821 control.flags |= IEEE80211_TXCTL_NO_ACK;
2822 control.retry_limit = 1;
2823
2824 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC);
2825 if (ifsta->probe_resp) {
2826 mgmt = (struct ieee80211_mgmt *)
2827 ifsta->probe_resp->data;
2828 mgmt->frame_control =
2829 IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2830 IEEE80211_STYPE_PROBE_RESP);
2831 } else {
2832 printk(KERN_DEBUG "%s: Could not allocate ProbeResp "
2833 "template for IBSS\n", dev->name);
2834 }
2835
2836 if (local->ops->beacon_update &&
2837 local->ops->beacon_update(local_to_hw(local),
2838 skb, &control) == 0) {
2839 printk(KERN_DEBUG "%s: Configured IBSS beacon "
2840 "template based on scan results\n", dev->name);
2841 skb = NULL;
2842 }
2843
2844 rates = 0;
2845 mode = local->oper_hw_mode;
2846 for (i = 0; i < bss->supp_rates_len; i++) {
2847 int bitrate = (bss->supp_rates[i] & 0x7f) * 5;
2848 for (j = 0; j < mode->num_rates; j++)
2849 if (mode->rates[j].rate == bitrate)
2850 rates |= BIT(j);
2851 }
2852 ifsta->supp_rates_bits = rates;
2853 } while (0);
2854
2855 if (skb) {
2856 printk(KERN_DEBUG "%s: Failed to configure IBSS beacon "
2857 "template\n", dev->name);
2858 dev_kfree_skb(skb);
2859 }
2860
2861 ifsta->state = IEEE80211_IBSS_JOINED;
2862 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
2863
2864 ieee80211_rx_bss_put(dev, bss);
2865
2866 return res;
2867}
2868
2869 3455
2870static int ieee80211_sta_create_ibss(struct net_device *dev, 3456static int ieee80211_sta_create_ibss(struct net_device *dev,
2871 struct ieee80211_if_sta *ifsta) 3457 struct ieee80211_if_sta *ifsta)
@@ -2873,7 +3459,7 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
2873 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3459 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2874 struct ieee80211_sta_bss *bss; 3460 struct ieee80211_sta_bss *bss;
2875 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3461 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2876 struct ieee80211_hw_mode *mode; 3462 struct ieee80211_supported_band *sband;
2877 u8 bssid[ETH_ALEN], *pos; 3463 u8 bssid[ETH_ALEN], *pos;
2878 int i; 3464 int i;
2879 DECLARE_MAC_BUF(mac); 3465 DECLARE_MAC_BUF(mac);
@@ -2895,28 +3481,28 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
2895 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", 3481 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n",
2896 dev->name, print_mac(mac, bssid)); 3482 dev->name, print_mac(mac, bssid));
2897 3483
2898 bss = ieee80211_rx_bss_add(dev, bssid, local->hw.conf.channel, 3484 bss = ieee80211_rx_bss_add(dev, bssid,
3485 local->hw.conf.channel->center_freq,
2899 sdata->u.sta.ssid, sdata->u.sta.ssid_len); 3486 sdata->u.sta.ssid, sdata->u.sta.ssid_len);
2900 if (!bss) 3487 if (!bss)
2901 return -ENOMEM; 3488 return -ENOMEM;
2902 3489
2903 mode = local->oper_hw_mode; 3490 bss->band = local->hw.conf.channel->band;
3491 sband = local->hw.wiphy->bands[bss->band];
2904 3492
2905 if (local->hw.conf.beacon_int == 0) 3493 if (local->hw.conf.beacon_int == 0)
2906 local->hw.conf.beacon_int = 100; 3494 local->hw.conf.beacon_int = 10000;
2907 bss->beacon_int = local->hw.conf.beacon_int; 3495 bss->beacon_int = local->hw.conf.beacon_int;
2908 bss->hw_mode = local->hw.conf.phymode;
2909 bss->freq = local->hw.conf.freq;
2910 bss->last_update = jiffies; 3496 bss->last_update = jiffies;
2911 bss->capability = WLAN_CAPABILITY_IBSS; 3497 bss->capability = WLAN_CAPABILITY_IBSS;
2912 if (sdata->default_key) { 3498 if (sdata->default_key) {
2913 bss->capability |= WLAN_CAPABILITY_PRIVACY; 3499 bss->capability |= WLAN_CAPABILITY_PRIVACY;
2914 } else 3500 } else
2915 sdata->drop_unencrypted = 0; 3501 sdata->drop_unencrypted = 0;
2916 bss->supp_rates_len = mode->num_rates; 3502 bss->supp_rates_len = sband->n_bitrates;
2917 pos = bss->supp_rates; 3503 pos = bss->supp_rates;
2918 for (i = 0; i < mode->num_rates; i++) { 3504 for (i = 0; i < sband->n_bitrates; i++) {
2919 int rate = mode->rates[i].rate; 3505 int rate = sband->bitrates[i].bitrate;
2920 *pos++ = (u8) (rate / 5); 3506 *pos++ = (u8) (rate / 5);
2921 } 3507 }
2922 3508
@@ -2965,7 +3551,8 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
2965 "%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid)); 3551 "%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid));
2966#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 3552#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2967 if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 && 3553 if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 &&
2968 (bss = ieee80211_rx_bss_get(dev, bssid, local->hw.conf.channel, 3554 (bss = ieee80211_rx_bss_get(dev, bssid,
3555 local->hw.conf.channel->center_freq,
2969 ifsta->ssid, ifsta->ssid_len))) { 3556 ifsta->ssid, ifsta->ssid_len))) {
2970 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" 3557 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
2971 " based on configured SSID\n", 3558 " based on configured SSID\n",
@@ -2993,13 +3580,13 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
2993 if (time_after(jiffies, ifsta->ibss_join_req + 3580 if (time_after(jiffies, ifsta->ibss_join_req +
2994 IEEE80211_IBSS_JOIN_TIMEOUT)) { 3581 IEEE80211_IBSS_JOIN_TIMEOUT)) {
2995 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && 3582 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) &&
2996 local->oper_channel->flag & IEEE80211_CHAN_W_IBSS) 3583 (!(local->oper_channel->flags &
3584 IEEE80211_CHAN_NO_IBSS)))
2997 return ieee80211_sta_create_ibss(dev, ifsta); 3585 return ieee80211_sta_create_ibss(dev, ifsta);
2998 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { 3586 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) {
2999 printk(KERN_DEBUG "%s: IBSS not allowed on the" 3587 printk(KERN_DEBUG "%s: IBSS not allowed on"
3000 " configured channel %d (%d MHz)\n", 3588 " %d MHz\n", dev->name,
3001 dev->name, local->hw.conf.channel, 3589 local->hw.conf.channel->center_freq);
3002 local->hw.conf.freq);
3003 } 3590 }
3004 3591
3005 /* No IBSS found - decrease scan interval and continue 3592 /* No IBSS found - decrease scan interval and continue
@@ -3018,41 +3605,12 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3018 3605
3019int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) 3606int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len)
3020{ 3607{
3021 struct ieee80211_sub_if_data *sdata; 3608 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3022 struct ieee80211_if_sta *ifsta; 3609 struct ieee80211_if_sta *ifsta;
3023 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3024 3610
3025 if (len > IEEE80211_MAX_SSID_LEN) 3611 if (len > IEEE80211_MAX_SSID_LEN)
3026 return -EINVAL; 3612 return -EINVAL;
3027 3613
3028 /* TODO: This should always be done for IBSS, even if IEEE80211_QOS is
3029 * not defined. */
3030 if (local->ops->conf_tx) {
3031 struct ieee80211_tx_queue_params qparam;
3032 int i;
3033
3034 memset(&qparam, 0, sizeof(qparam));
3035 /* TODO: are these ok defaults for all hw_modes? */
3036 qparam.aifs = 2;
3037 qparam.cw_min =
3038 local->hw.conf.phymode == MODE_IEEE80211B ? 31 : 15;
3039 qparam.cw_max = 1023;
3040 qparam.burst_time = 0;
3041 for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++)
3042 {
3043 local->ops->conf_tx(local_to_hw(local),
3044 i + IEEE80211_TX_QUEUE_DATA0,
3045 &qparam);
3046 }
3047 /* IBSS uses different parameters for Beacon sending */
3048 qparam.cw_min++;
3049 qparam.cw_min *= 2;
3050 qparam.cw_min--;
3051 local->ops->conf_tx(local_to_hw(local),
3052 IEEE80211_TX_QUEUE_BEACON, &qparam);
3053 }
3054
3055 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3056 ifsta = &sdata->u.sta; 3614 ifsta = &sdata->u.sta;
3057 3615
3058 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) 3616 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0)
@@ -3144,6 +3702,13 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local,
3144} 3702}
3145 3703
3146 3704
3705static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
3706{
3707 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
3708 ieee80211_vif_is_mesh(&sdata->vif))
3709 ieee80211_sta_timer((unsigned long)sdata);
3710}
3711
3147void ieee80211_scan_completed(struct ieee80211_hw *hw) 3712void ieee80211_scan_completed(struct ieee80211_hw *hw)
3148{ 3713{
3149 struct ieee80211_local *local = hw_to_local(hw); 3714 struct ieee80211_local *local = hw_to_local(hw);
@@ -3157,6 +3722,15 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
3157 3722
3158 if (local->sta_hw_scanning) { 3723 if (local->sta_hw_scanning) {
3159 local->sta_hw_scanning = 0; 3724 local->sta_hw_scanning = 0;
3725 if (ieee80211_hw_config(local))
3726 printk(KERN_DEBUG "%s: failed to restore operational "
3727 "channel after scan\n", dev->name);
3728 /* Restart STA timer for HW scan case */
3729 rcu_read_lock();
3730 list_for_each_entry_rcu(sdata, &local->interfaces, list)
3731 ieee80211_restart_sta_timer(sdata);
3732 rcu_read_unlock();
3733
3160 goto done; 3734 goto done;
3161 } 3735 }
3162 3736
@@ -3183,11 +3757,12 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
3183 if (sdata->dev == local->mdev) 3757 if (sdata->dev == local->mdev)
3184 continue; 3758 continue;
3185 3759
3186 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 3760 /* Tell AP we're back */
3187 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) 3761 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
3188 ieee80211_send_nullfunc(local, sdata, 0); 3762 sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)
3189 ieee80211_sta_timer((unsigned long)sdata); 3763 ieee80211_send_nullfunc(local, sdata, 0);
3190 } 3764
3765 ieee80211_restart_sta_timer(sdata);
3191 3766
3192 netif_wake_queue(sdata->dev); 3767 netif_wake_queue(sdata->dev);
3193 } 3768 }
@@ -3211,7 +3786,7 @@ void ieee80211_sta_scan_work(struct work_struct *work)
3211 container_of(work, struct ieee80211_local, scan_work.work); 3786 container_of(work, struct ieee80211_local, scan_work.work);
3212 struct net_device *dev = local->scan_dev; 3787 struct net_device *dev = local->scan_dev;
3213 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3788 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3214 struct ieee80211_hw_mode *mode; 3789 struct ieee80211_supported_band *sband;
3215 struct ieee80211_channel *chan; 3790 struct ieee80211_channel *chan;
3216 int skip; 3791 int skip;
3217 unsigned long next_delay = 0; 3792 unsigned long next_delay = 0;
@@ -3221,44 +3796,59 @@ void ieee80211_sta_scan_work(struct work_struct *work)
3221 3796
3222 switch (local->scan_state) { 3797 switch (local->scan_state) {
3223 case SCAN_SET_CHANNEL: 3798 case SCAN_SET_CHANNEL:
3224 mode = local->scan_hw_mode; 3799 /*
3225 if (local->scan_hw_mode->list.next == &local->modes_list && 3800 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS
3226 local->scan_channel_idx >= mode->num_channels) { 3801 * after we successfully scanned the last channel of the last
3802 * band (and the last band is supported by the hw)
3803 */
3804 if (local->scan_band < IEEE80211_NUM_BANDS)
3805 sband = local->hw.wiphy->bands[local->scan_band];
3806 else
3807 sband = NULL;
3808
3809 /*
3810 * If we are at an unsupported band and have more bands
3811 * left to scan, advance to the next supported one.
3812 */
3813 while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
3814 local->scan_band++;
3815 sband = local->hw.wiphy->bands[local->scan_band];
3816 local->scan_channel_idx = 0;
3817 }
3818
3819 /* if no more bands/channels left, complete scan */
3820 if (!sband || local->scan_channel_idx >= sband->n_channels) {
3227 ieee80211_scan_completed(local_to_hw(local)); 3821 ieee80211_scan_completed(local_to_hw(local));
3228 return; 3822 return;
3229 } 3823 }
3230 skip = !(local->enabled_modes & (1 << mode->mode)); 3824 skip = 0;
3231 chan = &mode->channels[local->scan_channel_idx]; 3825 chan = &sband->channels[local->scan_channel_idx];
3232 if (!(chan->flag & IEEE80211_CHAN_W_SCAN) || 3826
3827 if (chan->flags & IEEE80211_CHAN_DISABLED ||
3233 (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && 3828 (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
3234 !(chan->flag & IEEE80211_CHAN_W_IBSS)) || 3829 chan->flags & IEEE80211_CHAN_NO_IBSS))
3235 (local->hw_modes & local->enabled_modes &
3236 (1 << MODE_IEEE80211G) && mode->mode == MODE_IEEE80211B))
3237 skip = 1; 3830 skip = 1;
3238 3831
3239 if (!skip) { 3832 if (!skip) {
3240#if 0
3241 printk(KERN_DEBUG "%s: scan channel %d (%d MHz)\n",
3242 dev->name, chan->chan, chan->freq);
3243#endif
3244
3245 local->scan_channel = chan; 3833 local->scan_channel = chan;
3246 if (ieee80211_hw_config(local)) { 3834 if (ieee80211_hw_config(local)) {
3247 printk(KERN_DEBUG "%s: failed to set channel " 3835 printk(KERN_DEBUG "%s: failed to set freq to "
3248 "%d (%d MHz) for scan\n", dev->name, 3836 "%d MHz for scan\n", dev->name,
3249 chan->chan, chan->freq); 3837 chan->center_freq);
3250 skip = 1; 3838 skip = 1;
3251 } 3839 }
3252 } 3840 }
3253 3841
3842 /* advance state machine to next channel/band */
3254 local->scan_channel_idx++; 3843 local->scan_channel_idx++;
3255 if (local->scan_channel_idx >= local->scan_hw_mode->num_channels) { 3844 if (local->scan_channel_idx >= sband->n_channels) {
3256 if (local->scan_hw_mode->list.next != &local->modes_list) { 3845 /*
3257 local->scan_hw_mode = list_entry(local->scan_hw_mode->list.next, 3846 * scan_band may end up == IEEE80211_NUM_BANDS, but
3258 struct ieee80211_hw_mode, 3847 * we'll catch that case above and complete the scan
3259 list); 3848 * if that is the case.
3260 local->scan_channel_idx = 0; 3849 */
3261 } 3850 local->scan_band++;
3851 local->scan_channel_idx = 0;
3262 } 3852 }
3263 3853
3264 if (skip) 3854 if (skip)
@@ -3269,13 +3859,14 @@ void ieee80211_sta_scan_work(struct work_struct *work)
3269 local->scan_state = SCAN_SEND_PROBE; 3859 local->scan_state = SCAN_SEND_PROBE;
3270 break; 3860 break;
3271 case SCAN_SEND_PROBE: 3861 case SCAN_SEND_PROBE:
3272 if (local->scan_channel->flag & IEEE80211_CHAN_W_ACTIVE_SCAN) { 3862 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
3273 ieee80211_send_probe_req(dev, NULL, local->scan_ssid,
3274 local->scan_ssid_len);
3275 next_delay = IEEE80211_CHANNEL_TIME;
3276 } else
3277 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
3278 local->scan_state = SCAN_SET_CHANNEL; 3863 local->scan_state = SCAN_SET_CHANNEL;
3864
3865 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN)
3866 break;
3867 ieee80211_send_probe_req(dev, NULL, local->scan_ssid,
3868 local->scan_ssid_len);
3869 next_delay = IEEE80211_CHANNEL_TIME;
3279 break; 3870 break;
3280 } 3871 }
3281 3872
@@ -3350,10 +3941,8 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
3350 } else 3941 } else
3351 local->scan_ssid_len = 0; 3942 local->scan_ssid_len = 0;
3352 local->scan_state = SCAN_SET_CHANNEL; 3943 local->scan_state = SCAN_SET_CHANNEL;
3353 local->scan_hw_mode = list_entry(local->modes_list.next,
3354 struct ieee80211_hw_mode,
3355 list);
3356 local->scan_channel_idx = 0; 3944 local->scan_channel_idx = 0;
3945 local->scan_band = IEEE80211_BAND_2GHZ;
3357 local->scan_dev = dev; 3946 local->scan_dev = dev;
3358 3947
3359 netif_tx_lock_bh(local->mdev); 3948 netif_tx_lock_bh(local->mdev);
@@ -3408,9 +3997,6 @@ ieee80211_sta_scan_result(struct net_device *dev,
3408 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) 3997 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
3409 return current_ev; 3998 return current_ev;
3410 3999
3411 if (!(local->enabled_modes & (1 << bss->hw_mode)))
3412 return current_ev;
3413
3414 memset(&iwe, 0, sizeof(iwe)); 4000 memset(&iwe, 0, sizeof(iwe));
3415 iwe.cmd = SIOCGIWAP; 4001 iwe.cmd = SIOCGIWAP;
3416 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 4002 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
@@ -3420,15 +4006,25 @@ ieee80211_sta_scan_result(struct net_device *dev,
3420 4006
3421 memset(&iwe, 0, sizeof(iwe)); 4007 memset(&iwe, 0, sizeof(iwe));
3422 iwe.cmd = SIOCGIWESSID; 4008 iwe.cmd = SIOCGIWESSID;
3423 iwe.u.data.length = bss->ssid_len; 4009 if (bss_mesh_cfg(bss)) {
3424 iwe.u.data.flags = 1; 4010 iwe.u.data.length = bss_mesh_id_len(bss);
3425 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4011 iwe.u.data.flags = 1;
3426 bss->ssid); 4012 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe,
4013 bss_mesh_id(bss));
4014 } else {
4015 iwe.u.data.length = bss->ssid_len;
4016 iwe.u.data.flags = 1;
4017 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe,
4018 bss->ssid);
4019 }
3427 4020
3428 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) { 4021 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
4022 || bss_mesh_cfg(bss)) {
3429 memset(&iwe, 0, sizeof(iwe)); 4023 memset(&iwe, 0, sizeof(iwe));
3430 iwe.cmd = SIOCGIWMODE; 4024 iwe.cmd = SIOCGIWMODE;
3431 if (bss->capability & WLAN_CAPABILITY_ESS) 4025 if (bss_mesh_cfg(bss))
4026 iwe.u.mode = IW_MODE_MESH;
4027 else if (bss->capability & WLAN_CAPABILITY_ESS)
3432 iwe.u.mode = IW_MODE_MASTER; 4028 iwe.u.mode = IW_MODE_MASTER;
3433 else 4029 else
3434 iwe.u.mode = IW_MODE_ADHOC; 4030 iwe.u.mode = IW_MODE_ADHOC;
@@ -3438,12 +4034,15 @@ ieee80211_sta_scan_result(struct net_device *dev,
3438 4034
3439 memset(&iwe, 0, sizeof(iwe)); 4035 memset(&iwe, 0, sizeof(iwe));
3440 iwe.cmd = SIOCGIWFREQ; 4036 iwe.cmd = SIOCGIWFREQ;
3441 iwe.u.freq.m = bss->channel; 4037 iwe.u.freq.m = bss->freq;
3442 iwe.u.freq.e = 0; 4038 iwe.u.freq.e = 6;
3443 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4039 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
3444 IW_EV_FREQ_LEN); 4040 IW_EV_FREQ_LEN);
3445 iwe.u.freq.m = bss->freq * 100000; 4041
3446 iwe.u.freq.e = 1; 4042 memset(&iwe, 0, sizeof(iwe));
4043 iwe.cmd = SIOCGIWFREQ;
4044 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
4045 iwe.u.freq.e = 0;
3447 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4046 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
3448 IW_EV_FREQ_LEN); 4047 IW_EV_FREQ_LEN);
3449 4048
@@ -3514,6 +4113,45 @@ ieee80211_sta_scan_result(struct net_device *dev,
3514 } 4113 }
3515 } 4114 }
3516 4115
4116 if (bss_mesh_cfg(bss)) {
4117 char *buf;
4118 u8 *cfg = bss_mesh_cfg(bss);
4119 buf = kmalloc(50, GFP_ATOMIC);
4120 if (buf) {
4121 memset(&iwe, 0, sizeof(iwe));
4122 iwe.cmd = IWEVCUSTOM;
4123 sprintf(buf, "Mesh network (version %d)", cfg[0]);
4124 iwe.u.data.length = strlen(buf);
4125 current_ev = iwe_stream_add_point(current_ev, end_buf,
4126 &iwe, buf);
4127 sprintf(buf, "Path Selection Protocol ID: "
4128 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
4129 cfg[4]);
4130 iwe.u.data.length = strlen(buf);
4131 current_ev = iwe_stream_add_point(current_ev, end_buf,
4132 &iwe, buf);
4133 sprintf(buf, "Path Selection Metric ID: "
4134 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
4135 cfg[8]);
4136 iwe.u.data.length = strlen(buf);
4137 current_ev = iwe_stream_add_point(current_ev, end_buf,
4138 &iwe, buf);
4139 sprintf(buf, "Congestion Control Mode ID: "
4140 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
4141 cfg[11], cfg[12]);
4142 iwe.u.data.length = strlen(buf);
4143 current_ev = iwe_stream_add_point(current_ev, end_buf,
4144 &iwe, buf);
4145 sprintf(buf, "Channel Precedence: "
4146 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
4147 cfg[15], cfg[16]);
4148 iwe.u.data.length = strlen(buf);
4149 current_ev = iwe_stream_add_point(current_ev, end_buf,
4150 &iwe, buf);
4151 kfree(buf);
4152 }
4153 }
4154
3517 return current_ev; 4155 return current_ev;
3518} 4156}
3519 4157
@@ -3582,15 +4220,21 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev,
3582 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", 4220 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
3583 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); 4221 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name);
3584 4222
3585 sta = sta_info_add(local, dev, addr, GFP_ATOMIC); 4223 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
3586 if (!sta) 4224 if (!sta)
3587 return NULL; 4225 return NULL;
3588 4226
3589 sta->supp_rates = sdata->u.sta.supp_rates_bits; 4227 sta->flags |= WLAN_STA_AUTHORIZED;
4228
4229 sta->supp_rates[local->hw.conf.channel->band] =
4230 sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band];
3590 4231
3591 rate_control_rate_init(sta, local); 4232 rate_control_rate_init(sta, local);
3592 4233
3593 return sta; /* caller will call sta_info_put() */ 4234 if (sta_info_insert(sta))
4235 return NULL;
4236
4237 return sta;
3594} 4238}
3595 4239
3596 4240
@@ -3630,3 +4274,26 @@ int ieee80211_sta_disassociate(struct net_device *dev, u16 reason)
3630 ieee80211_set_disassoc(dev, ifsta, 0); 4274 ieee80211_set_disassoc(dev, ifsta, 0);
3631 return 0; 4275 return 0;
3632} 4276}
4277
4278void ieee80211_notify_mac(struct ieee80211_hw *hw,
4279 enum ieee80211_notification_types notif_type)
4280{
4281 struct ieee80211_local *local = hw_to_local(hw);
4282 struct ieee80211_sub_if_data *sdata;
4283
4284 switch (notif_type) {
4285 case IEEE80211_NOTIFY_RE_ASSOC:
4286 rcu_read_lock();
4287 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4288
4289 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
4290 ieee80211_sta_req_auth(sdata->dev,
4291 &sdata->u.sta);
4292 }
4293
4294 }
4295 rcu_read_unlock();
4296 break;
4297 }
4298}
4299EXPORT_SYMBOL(ieee80211_notify_mac);
diff --git a/net/mac80211/ieee80211_rate.c b/net/mac80211/rate.c
index b957e67c5fba..841df93807fc 100644
--- a/net/mac80211/ieee80211_rate.c
+++ b/net/mac80211/rate.c
@@ -10,7 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include "ieee80211_rate.h" 13#include "rate.h"
14#include "ieee80211_i.h" 14#include "ieee80211_i.h"
15 15
16struct rate_control_alg { 16struct rate_control_alg {
@@ -163,34 +163,37 @@ static void rate_control_release(struct kref *kref)
163} 163}
164 164
165void rate_control_get_rate(struct net_device *dev, 165void rate_control_get_rate(struct net_device *dev,
166 struct ieee80211_hw_mode *mode, struct sk_buff *skb, 166 struct ieee80211_supported_band *sband,
167 struct sk_buff *skb,
167 struct rate_selection *sel) 168 struct rate_selection *sel)
168{ 169{
169 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 170 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
170 struct rate_control_ref *ref = local->rate_ctrl; 171 struct rate_control_ref *ref = local->rate_ctrl;
171 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 172 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
172 struct sta_info *sta = sta_info_get(local, hdr->addr1); 173 struct sta_info *sta;
173 int i; 174 int i;
174 175
176 rcu_read_lock();
177 sta = sta_info_get(local, hdr->addr1);
178
175 memset(sel, 0, sizeof(struct rate_selection)); 179 memset(sel, 0, sizeof(struct rate_selection));
176 180
177 ref->ops->get_rate(ref->priv, dev, mode, skb, sel); 181 ref->ops->get_rate(ref->priv, dev, sband, skb, sel);
178 182
179 /* Select a non-ERP backup rate. */ 183 /* Select a non-ERP backup rate. */
180 if (!sel->nonerp) { 184 if (!sel->nonerp) {
181 for (i = 0; i < mode->num_rates - 1; i++) { 185 for (i = 0; i < sband->n_bitrates; i++) {
182 struct ieee80211_rate *rate = &mode->rates[i]; 186 struct ieee80211_rate *rate = &sband->bitrates[i];
183 if (sel->rate->rate < rate->rate) 187 if (sel->rate->bitrate < rate->bitrate)
184 break; 188 break;
185 189
186 if (rate_supported(sta, mode, i) && 190 if (rate_supported(sta, sband->band, i) &&
187 !(rate->flags & IEEE80211_RATE_ERP)) 191 !(rate->flags & IEEE80211_RATE_ERP_G))
188 sel->nonerp = rate; 192 sel->nonerp = rate;
189 } 193 }
190 } 194 }
191 195
192 if (sta) 196 rcu_read_unlock();
193 sta_info_put(sta);
194} 197}
195 198
196struct rate_control_ref *rate_control_get(struct rate_control_ref *ref) 199struct rate_control_ref *rate_control_get(struct rate_control_ref *ref)
diff --git a/net/mac80211/ieee80211_rate.h b/net/mac80211/rate.h
index 73f19e8aa51c..5b45f33cb766 100644
--- a/net/mac80211/ieee80211_rate.h
+++ b/net/mac80211/rate.h
@@ -14,10 +14,12 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/kref.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "sta_info.h" 20#include "sta_info.h"
20 21
22/* TODO: kdoc */
21struct rate_selection { 23struct rate_selection {
22 /* Selected transmission rate */ 24 /* Selected transmission rate */
23 struct ieee80211_rate *rate; 25 struct ieee80211_rate *rate;
@@ -34,7 +36,8 @@ struct rate_control_ops {
34 struct sk_buff *skb, 36 struct sk_buff *skb,
35 struct ieee80211_tx_status *status); 37 struct ieee80211_tx_status *status);
36 void (*get_rate)(void *priv, struct net_device *dev, 38 void (*get_rate)(void *priv, struct net_device *dev,
37 struct ieee80211_hw_mode *mode, struct sk_buff *skb, 39 struct ieee80211_supported_band *band,
40 struct sk_buff *skb,
38 struct rate_selection *sel); 41 struct rate_selection *sel);
39 void (*rate_init)(void *priv, void *priv_sta, 42 void (*rate_init)(void *priv, void *priv_sta,
40 struct ieee80211_local *local, struct sta_info *sta); 43 struct ieee80211_local *local, struct sta_info *sta);
@@ -66,7 +69,8 @@ void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
66struct rate_control_ref *rate_control_alloc(const char *name, 69struct rate_control_ref *rate_control_alloc(const char *name,
67 struct ieee80211_local *local); 70 struct ieee80211_local *local);
68void rate_control_get_rate(struct net_device *dev, 71void rate_control_get_rate(struct net_device *dev,
69 struct ieee80211_hw_mode *mode, struct sk_buff *skb, 72 struct ieee80211_supported_band *sband,
73 struct sk_buff *skb,
70 struct rate_selection *sel); 74 struct rate_selection *sel);
71struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); 75struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
72void rate_control_put(struct rate_control_ref *ref); 76void rate_control_put(struct rate_control_ref *ref);
@@ -127,23 +131,23 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
127#endif 131#endif
128} 132}
129 133
130static inline int 134static inline int rate_supported(struct sta_info *sta,
131rate_supported(struct sta_info *sta, struct ieee80211_hw_mode *mode, int index) 135 enum ieee80211_band band,
136 int index)
132{ 137{
133 return (sta == NULL || sta->supp_rates & BIT(index)) && 138 return (sta == NULL || sta->supp_rates[band] & BIT(index));
134 (mode->rates[index].flags & IEEE80211_RATE_SUPPORTED);
135} 139}
136 140
137static inline int 141static inline int
138rate_lowest_index(struct ieee80211_local *local, struct ieee80211_hw_mode *mode, 142rate_lowest_index(struct ieee80211_local *local,
143 struct ieee80211_supported_band *sband,
139 struct sta_info *sta) 144 struct sta_info *sta)
140{ 145{
141 int i; 146 int i;
142 147
143 for (i = 0; i < mode->num_rates; i++) { 148 for (i = 0; i < sband->n_bitrates; i++)
144 if (rate_supported(sta, mode, i)) 149 if (rate_supported(sta, sband->band, i))
145 return i; 150 return i;
146 }
147 151
148 /* warn when we cannot find a rate. */ 152 /* warn when we cannot find a rate. */
149 WARN_ON(1); 153 WARN_ON(1);
@@ -152,10 +156,11 @@ rate_lowest_index(struct ieee80211_local *local, struct ieee80211_hw_mode *mode,
152} 156}
153 157
154static inline struct ieee80211_rate * 158static inline struct ieee80211_rate *
155rate_lowest(struct ieee80211_local *local, struct ieee80211_hw_mode *mode, 159rate_lowest(struct ieee80211_local *local,
160 struct ieee80211_supported_band *sband,
156 struct sta_info *sta) 161 struct sta_info *sta)
157{ 162{
158 return &mode->rates[rate_lowest_index(local, mode, sta)]; 163 return &sband->bitrates[rate_lowest_index(local, sband, sta)];
159} 164}
160 165
161 166
@@ -166,21 +171,6 @@ void rate_control_deinitialize(struct ieee80211_local *local);
166 171
167 172
168/* Rate control algorithms */ 173/* Rate control algorithms */
169#if defined(RC80211_SIMPLE_COMPILE) || \
170 (defined(CONFIG_MAC80211_RC_SIMPLE) && \
171 !defined(CONFIG_MAC80211_RC_SIMPLE_MODULE))
172extern int rc80211_simple_init(void);
173extern void rc80211_simple_exit(void);
174#else
175static inline int rc80211_simple_init(void)
176{
177 return 0;
178}
179static inline void rc80211_simple_exit(void)
180{
181}
182#endif
183
184#if defined(RC80211_PID_COMPILE) || \ 174#if defined(RC80211_PID_COMPILE) || \
185 (defined(CONFIG_MAC80211_RC_PID) && \ 175 (defined(CONFIG_MAC80211_RC_PID) && \
186 !defined(CONFIG_MAC80211_RC_PID_MODULE)) 176 !defined(CONFIG_MAC80211_RC_PID_MODULE))
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 3b77410588e7..a849b745bdb5 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -14,8 +14,8 @@
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/debugfs.h> 15#include <linux/debugfs.h>
16#include <net/mac80211.h> 16#include <net/mac80211.h>
17#include "ieee80211_rate.h" 17#include "rate.h"
18 18#include "mesh.h"
19#include "rc80211_pid.h" 19#include "rc80211_pid.h"
20 20
21 21
@@ -63,6 +63,7 @@
63 * RC_PID_ARITH_SHIFT. 63 * RC_PID_ARITH_SHIFT.
64 */ 64 */
65 65
66
66/* Adjust the rate while ensuring that we won't switch to a lower rate if it 67/* Adjust the rate while ensuring that we won't switch to a lower rate if it
67 * exhibited a worse failed frames behaviour and we'll choose the highest rate 68 * exhibited a worse failed frames behaviour and we'll choose the highest rate
68 * whose failed frames behaviour is not worse than the one of the original rate 69 * whose failed frames behaviour is not worse than the one of the original rate
@@ -72,14 +73,14 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
72 struct rc_pid_rateinfo *rinfo) 73 struct rc_pid_rateinfo *rinfo)
73{ 74{
74 struct ieee80211_sub_if_data *sdata; 75 struct ieee80211_sub_if_data *sdata;
75 struct ieee80211_hw_mode *mode; 76 struct ieee80211_supported_band *sband;
76 int cur_sorted, new_sorted, probe, tmp, n_bitrates; 77 int cur_sorted, new_sorted, probe, tmp, n_bitrates, band;
77 int cur = sta->txrate; 78 int cur = sta->txrate_idx;
78
79 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
80 79
81 mode = local->oper_hw_mode; 80 sdata = sta->sdata;
82 n_bitrates = mode->num_rates; 81 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
82 band = sband->band;
83 n_bitrates = sband->n_bitrates;
83 84
84 /* Map passed arguments to sorted values. */ 85 /* Map passed arguments to sorted values. */
85 cur_sorted = rinfo[cur].rev_index; 86 cur_sorted = rinfo[cur].rev_index;
@@ -97,20 +98,20 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
97 /* Ensure that the rate decrease isn't disadvantageous. */ 98 /* Ensure that the rate decrease isn't disadvantageous. */
98 for (probe = cur_sorted; probe >= new_sorted; probe--) 99 for (probe = cur_sorted; probe >= new_sorted; probe--)
99 if (rinfo[probe].diff <= rinfo[cur_sorted].diff && 100 if (rinfo[probe].diff <= rinfo[cur_sorted].diff &&
100 rate_supported(sta, mode, rinfo[probe].index)) 101 rate_supported(sta, band, rinfo[probe].index))
101 tmp = probe; 102 tmp = probe;
102 } else { 103 } else {
103 /* Look for rate increase with zero (or below) cost. */ 104 /* Look for rate increase with zero (or below) cost. */
104 for (probe = new_sorted + 1; probe < n_bitrates; probe++) 105 for (probe = new_sorted + 1; probe < n_bitrates; probe++)
105 if (rinfo[probe].diff <= rinfo[new_sorted].diff && 106 if (rinfo[probe].diff <= rinfo[new_sorted].diff &&
106 rate_supported(sta, mode, rinfo[probe].index)) 107 rate_supported(sta, band, rinfo[probe].index))
107 tmp = probe; 108 tmp = probe;
108 } 109 }
109 110
110 /* Fit the rate found to the nearest supported rate. */ 111 /* Fit the rate found to the nearest supported rate. */
111 do { 112 do {
112 if (rate_supported(sta, mode, rinfo[tmp].index)) { 113 if (rate_supported(sta, band, rinfo[tmp].index)) {
113 sta->txrate = rinfo[tmp].index; 114 sta->txrate_idx = rinfo[tmp].index;
114 break; 115 break;
115 } 116 }
116 if (adj < 0) 117 if (adj < 0)
@@ -122,7 +123,7 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
122#ifdef CONFIG_MAC80211_DEBUGFS 123#ifdef CONFIG_MAC80211_DEBUGFS
123 rate_control_pid_event_rate_change( 124 rate_control_pid_event_rate_change(
124 &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, 125 &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events,
125 cur, mode->rates[cur].rate); 126 sta->txrate_idx, sband->bitrates[sta->txrate_idx].bitrate);
126#endif 127#endif
127} 128}
128 129
@@ -147,9 +148,12 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
147 struct ieee80211_local *local, 148 struct ieee80211_local *local,
148 struct sta_info *sta) 149 struct sta_info *sta)
149{ 150{
151#ifdef CONFIG_MAC80211_MESH
152 struct ieee80211_sub_if_data *sdata = sta->sdata;
153#endif
150 struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv; 154 struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv;
151 struct rc_pid_rateinfo *rinfo = pinfo->rinfo; 155 struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
152 struct ieee80211_hw_mode *mode; 156 struct ieee80211_supported_band *sband;
153 u32 pf; 157 u32 pf;
154 s32 err_avg; 158 s32 err_avg;
155 u32 err_prop; 159 u32 err_prop;
@@ -158,7 +162,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
158 int adj, i, j, tmp; 162 int adj, i, j, tmp;
159 unsigned long period; 163 unsigned long period;
160 164
161 mode = local->oper_hw_mode; 165 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
162 spinfo = sta->rate_ctrl_priv; 166 spinfo = sta->rate_ctrl_priv;
163 167
164 /* In case nothing happened during the previous control interval, turn 168 /* In case nothing happened during the previous control interval, turn
@@ -177,25 +181,32 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
177 pf = spinfo->last_pf; 181 pf = spinfo->last_pf;
178 else { 182 else {
179 pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; 183 pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
184#ifdef CONFIG_MAC80211_MESH
185 if (pf == 100 &&
186 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT)
187 mesh_plink_broken(sta);
188#endif
180 pf <<= RC_PID_ARITH_SHIFT; 189 pf <<= RC_PID_ARITH_SHIFT;
190 sta->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9)
191 >> RC_PID_ARITH_SHIFT;
181 } 192 }
182 193
183 spinfo->tx_num_xmit = 0; 194 spinfo->tx_num_xmit = 0;
184 spinfo->tx_num_failed = 0; 195 spinfo->tx_num_failed = 0;
185 196
186 /* If we just switched rate, update the rate behaviour info. */ 197 /* If we just switched rate, update the rate behaviour info. */
187 if (pinfo->oldrate != sta->txrate) { 198 if (pinfo->oldrate != sta->txrate_idx) {
188 199
189 i = rinfo[pinfo->oldrate].rev_index; 200 i = rinfo[pinfo->oldrate].rev_index;
190 j = rinfo[sta->txrate].rev_index; 201 j = rinfo[sta->txrate_idx].rev_index;
191 202
192 tmp = (pf - spinfo->last_pf); 203 tmp = (pf - spinfo->last_pf);
193 tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); 204 tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT);
194 205
195 rinfo[j].diff = rinfo[i].diff + tmp; 206 rinfo[j].diff = rinfo[i].diff + tmp;
196 pinfo->oldrate = sta->txrate; 207 pinfo->oldrate = sta->txrate_idx;
197 } 208 }
198 rate_control_pid_normalize(pinfo, mode->num_rates); 209 rate_control_pid_normalize(pinfo, sband->n_bitrates);
199 210
200 /* Compute the proportional, integral and derivative errors. */ 211 /* Compute the proportional, integral and derivative errors. */
201 err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; 212 err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf;
@@ -236,23 +247,27 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
236 struct sta_info *sta; 247 struct sta_info *sta;
237 struct rc_pid_sta_info *spinfo; 248 struct rc_pid_sta_info *spinfo;
238 unsigned long period; 249 unsigned long period;
250 struct ieee80211_supported_band *sband;
251
252 rcu_read_lock();
239 253
240 sta = sta_info_get(local, hdr->addr1); 254 sta = sta_info_get(local, hdr->addr1);
255 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
241 256
242 if (!sta) 257 if (!sta)
243 return; 258 goto unlock;
244 259
245 /* Don't update the state if we're not controlling the rate. */ 260 /* Don't update the state if we're not controlling the rate. */
246 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 261 sdata = sta->sdata;
247 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { 262 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) {
248 sta->txrate = sdata->bss->max_ratectrl_rateidx; 263 sta->txrate_idx = sdata->bss->max_ratectrl_rateidx;
249 return; 264 goto unlock;
250 } 265 }
251 266
252 /* Ignore all frames that were sent with a different rate than the rate 267 /* Ignore all frames that were sent with a different rate than the rate
253 * we currently advise mac80211 to use. */ 268 * we currently advise mac80211 to use. */
254 if (status->control.rate != &local->oper_hw_mode->rates[sta->txrate]) 269 if (status->control.tx_rate != &sband->bitrates[sta->txrate_idx])
255 goto ignore; 270 goto unlock;
256 271
257 spinfo = sta->rate_ctrl_priv; 272 spinfo = sta->rate_ctrl_priv;
258 spinfo->tx_num_xmit++; 273 spinfo->tx_num_xmit++;
@@ -277,9 +292,6 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
277 sta->tx_num_consecutive_failures++; 292 sta->tx_num_consecutive_failures++;
278 sta->tx_num_mpdu_fail++; 293 sta->tx_num_mpdu_fail++;
279 } else { 294 } else {
280 sta->last_ack_rssi[0] = sta->last_ack_rssi[1];
281 sta->last_ack_rssi[1] = sta->last_ack_rssi[2];
282 sta->last_ack_rssi[2] = status->ack_signal;
283 sta->tx_num_consecutive_failures = 0; 295 sta->tx_num_consecutive_failures = 0;
284 sta->tx_num_mpdu_ok++; 296 sta->tx_num_mpdu_ok++;
285 } 297 }
@@ -293,12 +305,12 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
293 if (time_after(jiffies, spinfo->last_sample + period)) 305 if (time_after(jiffies, spinfo->last_sample + period))
294 rate_control_pid_sample(pinfo, local, sta); 306 rate_control_pid_sample(pinfo, local, sta);
295 307
296ignore: 308 unlock:
297 sta_info_put(sta); 309 rcu_read_unlock();
298} 310}
299 311
300static void rate_control_pid_get_rate(void *priv, struct net_device *dev, 312static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
301 struct ieee80211_hw_mode *mode, 313 struct ieee80211_supported_band *sband,
302 struct sk_buff *skb, 314 struct sk_buff *skb,
303 struct rate_selection *sel) 315 struct rate_selection *sel)
304{ 316{
@@ -309,6 +321,8 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
309 int rateidx; 321 int rateidx;
310 u16 fc; 322 u16 fc;
311 323
324 rcu_read_lock();
325
312 sta = sta_info_get(local, hdr->addr1); 326 sta = sta_info_get(local, hdr->addr1);
313 327
314 /* Send management frames and broadcast/multicast data using lowest 328 /* Send management frames and broadcast/multicast data using lowest
@@ -316,32 +330,31 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
316 fc = le16_to_cpu(hdr->frame_control); 330 fc = le16_to_cpu(hdr->frame_control);
317 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 331 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
318 is_multicast_ether_addr(hdr->addr1) || !sta) { 332 is_multicast_ether_addr(hdr->addr1) || !sta) {
319 sel->rate = rate_lowest(local, mode, sta); 333 sel->rate = rate_lowest(local, sband, sta);
320 if (sta) 334 rcu_read_unlock();
321 sta_info_put(sta);
322 return; 335 return;
323 } 336 }
324 337
325 /* If a forced rate is in effect, select it. */ 338 /* If a forced rate is in effect, select it. */
326 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 339 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
327 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) 340 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1)
328 sta->txrate = sdata->bss->force_unicast_rateidx; 341 sta->txrate_idx = sdata->bss->force_unicast_rateidx;
329 342
330 rateidx = sta->txrate; 343 rateidx = sta->txrate_idx;
331 344
332 if (rateidx >= mode->num_rates) 345 if (rateidx >= sband->n_bitrates)
333 rateidx = mode->num_rates - 1; 346 rateidx = sband->n_bitrates - 1;
334 347
335 sta->last_txrate = rateidx; 348 sta->last_txrate_idx = rateidx;
336 349
337 sta_info_put(sta); 350 rcu_read_unlock();
338 351
339 sel->rate = &mode->rates[rateidx]; 352 sel->rate = &sband->bitrates[rateidx];
340 353
341#ifdef CONFIG_MAC80211_DEBUGFS 354#ifdef CONFIG_MAC80211_DEBUGFS
342 rate_control_pid_event_tx_rate( 355 rate_control_pid_event_tx_rate(
343 &((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events, 356 &((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events,
344 rateidx, mode->rates[rateidx].rate); 357 rateidx, sband->bitrates[rateidx].bitrate);
345#endif 358#endif
346} 359}
347 360
@@ -353,28 +366,33 @@ static void rate_control_pid_rate_init(void *priv, void *priv_sta,
353 * as we need to have IEEE 802.1X auth succeed immediately after assoc.. 366 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
354 * Until that method is implemented, we will use the lowest supported 367 * Until that method is implemented, we will use the lowest supported
355 * rate as a workaround. */ 368 * rate as a workaround. */
356 sta->txrate = rate_lowest_index(local, local->oper_hw_mode, sta); 369 struct ieee80211_supported_band *sband;
370
371 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
372 sta->txrate_idx = rate_lowest_index(local, sband, sta);
373 sta->fail_avg = 0;
357} 374}
358 375
359static void *rate_control_pid_alloc(struct ieee80211_local *local) 376static void *rate_control_pid_alloc(struct ieee80211_local *local)
360{ 377{
361 struct rc_pid_info *pinfo; 378 struct rc_pid_info *pinfo;
362 struct rc_pid_rateinfo *rinfo; 379 struct rc_pid_rateinfo *rinfo;
363 struct ieee80211_hw_mode *mode; 380 struct ieee80211_supported_band *sband;
364 int i, j, tmp; 381 int i, j, tmp;
365 bool s; 382 bool s;
366#ifdef CONFIG_MAC80211_DEBUGFS 383#ifdef CONFIG_MAC80211_DEBUGFS
367 struct rc_pid_debugfs_entries *de; 384 struct rc_pid_debugfs_entries *de;
368#endif 385#endif
369 386
387 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
388
370 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); 389 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
371 if (!pinfo) 390 if (!pinfo)
372 return NULL; 391 return NULL;
373 392
374 /* We can safely assume that oper_hw_mode won't change unless we get 393 /* We can safely assume that sband won't change unless we get
375 * reinitialized. */ 394 * reinitialized. */
376 mode = local->oper_hw_mode; 395 rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
377 rinfo = kmalloc(sizeof(*rinfo) * mode->num_rates, GFP_ATOMIC);
378 if (!rinfo) { 396 if (!rinfo) {
379 kfree(pinfo); 397 kfree(pinfo);
380 return NULL; 398 return NULL;
@@ -383,7 +401,7 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
383 /* Sort the rates. This is optimized for the most common case (i.e. 401 /* Sort the rates. This is optimized for the most common case (i.e.
384 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed 402 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
385 * mapping too. */ 403 * mapping too. */
386 for (i = 0; i < mode->num_rates; i++) { 404 for (i = 0; i < sband->n_bitrates; i++) {
387 rinfo[i].index = i; 405 rinfo[i].index = i;
388 rinfo[i].rev_index = i; 406 rinfo[i].rev_index = i;
389 if (pinfo->fast_start) 407 if (pinfo->fast_start)
@@ -391,11 +409,11 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
391 else 409 else
392 rinfo[i].diff = i * pinfo->norm_offset; 410 rinfo[i].diff = i * pinfo->norm_offset;
393 } 411 }
394 for (i = 1; i < mode->num_rates; i++) { 412 for (i = 1; i < sband->n_bitrates; i++) {
395 s = 0; 413 s = 0;
396 for (j = 0; j < mode->num_rates - i; j++) 414 for (j = 0; j < sband->n_bitrates - i; j++)
397 if (unlikely(mode->rates[rinfo[j].index].rate > 415 if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
398 mode->rates[rinfo[j + 1].index].rate)) { 416 sband->bitrates[rinfo[j + 1].index].bitrate)) {
399 tmp = rinfo[j].index; 417 tmp = rinfo[j].index;
400 rinfo[j].index = rinfo[j + 1].index; 418 rinfo[j].index = rinfo[j + 1].index;
401 rinfo[j + 1].index = tmp; 419 rinfo[j + 1].index = tmp;
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 88b8dc9999bb..ae75d4178739 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -13,7 +13,7 @@
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14 14
15#include <net/mac80211.h> 15#include <net/mac80211.h>
16#include "ieee80211_rate.h" 16#include "rate.h"
17 17
18#include "rc80211_pid.h" 18#include "rc80211_pid.h"
19 19
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
deleted file mode 100644
index 9a78b116acff..000000000000
--- a/net/mac80211/rc80211_simple.c
+++ /dev/null
@@ -1,400 +0,0 @@
1/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005, Devicescape Software, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/init.h>
11#include <linux/netdevice.h>
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <linux/skbuff.h>
15#include <linux/compiler.h>
16#include <linux/module.h>
17
18#include <net/mac80211.h>
19#include "ieee80211_i.h"
20#include "ieee80211_rate.h"
21#include "debugfs.h"
22
23
24/* This is a minimal implementation of TX rate controlling that can be used
25 * as the default when no improved mechanisms are available. */
26
27#define RATE_CONTROL_NUM_DOWN 20
28#define RATE_CONTROL_NUM_UP 15
29
30#define RATE_CONTROL_EMERG_DEC 2
31#define RATE_CONTROL_INTERVAL (HZ / 20)
32#define RATE_CONTROL_MIN_TX 10
33
34static void rate_control_rate_inc(struct ieee80211_local *local,
35 struct sta_info *sta)
36{
37 struct ieee80211_sub_if_data *sdata;
38 struct ieee80211_hw_mode *mode;
39 int i = sta->txrate;
40 int maxrate;
41
42 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
43 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) {
44 /* forced unicast rate - do not change STA rate */
45 return;
46 }
47
48 mode = local->oper_hw_mode;
49 maxrate = sdata->bss ? sdata->bss->max_ratectrl_rateidx : -1;
50
51 if (i > mode->num_rates)
52 i = mode->num_rates - 2;
53
54 while (i + 1 < mode->num_rates) {
55 i++;
56 if (sta->supp_rates & BIT(i) &&
57 mode->rates[i].flags & IEEE80211_RATE_SUPPORTED &&
58 (maxrate < 0 || i <= maxrate)) {
59 sta->txrate = i;
60 break;
61 }
62 }
63}
64
65
66static void rate_control_rate_dec(struct ieee80211_local *local,
67 struct sta_info *sta)
68{
69 struct ieee80211_sub_if_data *sdata;
70 struct ieee80211_hw_mode *mode;
71 int i = sta->txrate;
72
73 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
74 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) {
75 /* forced unicast rate - do not change STA rate */
76 return;
77 }
78
79 mode = local->oper_hw_mode;
80 if (i > mode->num_rates)
81 i = mode->num_rates;
82
83 while (i > 0) {
84 i--;
85 if (sta->supp_rates & BIT(i) &&
86 mode->rates[i].flags & IEEE80211_RATE_SUPPORTED) {
87 sta->txrate = i;
88 break;
89 }
90 }
91}
92
93struct global_rate_control {
94 int dummy;
95};
96
97struct sta_rate_control {
98 unsigned long last_rate_change;
99 u32 tx_num_failures;
100 u32 tx_num_xmit;
101
102 unsigned long avg_rate_update;
103 u32 tx_avg_rate_sum;
104 u32 tx_avg_rate_num;
105
106#ifdef CONFIG_MAC80211_DEBUGFS
107 struct dentry *tx_avg_rate_sum_dentry;
108 struct dentry *tx_avg_rate_num_dentry;
109#endif
110};
111
112
113static void rate_control_simple_tx_status(void *priv, struct net_device *dev,
114 struct sk_buff *skb,
115 struct ieee80211_tx_status *status)
116{
117 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
118 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
119 struct sta_info *sta;
120 struct sta_rate_control *srctrl;
121
122 sta = sta_info_get(local, hdr->addr1);
123
124 if (!sta)
125 return;
126
127 srctrl = sta->rate_ctrl_priv;
128 srctrl->tx_num_xmit++;
129 if (status->excessive_retries) {
130 srctrl->tx_num_failures++;
131 sta->tx_retry_failed++;
132 sta->tx_num_consecutive_failures++;
133 sta->tx_num_mpdu_fail++;
134 } else {
135 sta->last_ack_rssi[0] = sta->last_ack_rssi[1];
136 sta->last_ack_rssi[1] = sta->last_ack_rssi[2];
137 sta->last_ack_rssi[2] = status->ack_signal;
138 sta->tx_num_consecutive_failures = 0;
139 sta->tx_num_mpdu_ok++;
140 }
141 sta->tx_retry_count += status->retry_count;
142 sta->tx_num_mpdu_fail += status->retry_count;
143
144 if (time_after(jiffies,
145 srctrl->last_rate_change + RATE_CONTROL_INTERVAL) &&
146 srctrl->tx_num_xmit > RATE_CONTROL_MIN_TX) {
147 u32 per_failed;
148 srctrl->last_rate_change = jiffies;
149
150 per_failed = (100 * sta->tx_num_mpdu_fail) /
151 (sta->tx_num_mpdu_fail + sta->tx_num_mpdu_ok);
152 /* TODO: calculate average per_failed to make adjusting
153 * parameters easier */
154#if 0
155 if (net_ratelimit()) {
156 printk(KERN_DEBUG "MPDU fail=%d ok=%d per_failed=%d\n",
157 sta->tx_num_mpdu_fail, sta->tx_num_mpdu_ok,
158 per_failed);
159 }
160#endif
161
162 /*
163 * XXX: Make these configurable once we have an
164 * interface to the rate control algorithms
165 */
166 if (per_failed > RATE_CONTROL_NUM_DOWN) {
167 rate_control_rate_dec(local, sta);
168 } else if (per_failed < RATE_CONTROL_NUM_UP) {
169 rate_control_rate_inc(local, sta);
170 }
171 srctrl->tx_avg_rate_sum += status->control.rate->rate;
172 srctrl->tx_avg_rate_num++;
173 srctrl->tx_num_failures = 0;
174 srctrl->tx_num_xmit = 0;
175 } else if (sta->tx_num_consecutive_failures >=
176 RATE_CONTROL_EMERG_DEC) {
177 rate_control_rate_dec(local, sta);
178 }
179
180 if (srctrl->avg_rate_update + 60 * HZ < jiffies) {
181 srctrl->avg_rate_update = jiffies;
182 if (srctrl->tx_avg_rate_num > 0) {
183#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
184 DECLARE_MAC_BUF(mac);
185 printk(KERN_DEBUG "%s: STA %s Average rate: "
186 "%d (%d/%d)\n",
187 dev->name, print_mac(mac, sta->addr),
188 srctrl->tx_avg_rate_sum /
189 srctrl->tx_avg_rate_num,
190 srctrl->tx_avg_rate_sum,
191 srctrl->tx_avg_rate_num);
192#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
193 srctrl->tx_avg_rate_sum = 0;
194 srctrl->tx_avg_rate_num = 0;
195 }
196 }
197
198 sta_info_put(sta);
199}
200
201
202static void
203rate_control_simple_get_rate(void *priv, struct net_device *dev,
204 struct ieee80211_hw_mode *mode,
205 struct sk_buff *skb,
206 struct rate_selection *sel)
207{
208 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
209 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
210 struct ieee80211_sub_if_data *sdata;
211 struct sta_info *sta;
212 int rateidx;
213 u16 fc;
214
215 sta = sta_info_get(local, hdr->addr1);
216
217 /* Send management frames and broadcast/multicast data using lowest
218 * rate. */
219 fc = le16_to_cpu(hdr->frame_control);
220 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
221 is_multicast_ether_addr(hdr->addr1) || !sta) {
222 sel->rate = rate_lowest(local, mode, sta);
223 if (sta)
224 sta_info_put(sta);
225 return;
226 }
227
228 /* If a forced rate is in effect, select it. */
229 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
230 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1)
231 sta->txrate = sdata->bss->force_unicast_rateidx;
232
233 rateidx = sta->txrate;
234
235 if (rateidx >= mode->num_rates)
236 rateidx = mode->num_rates - 1;
237
238 sta->last_txrate = rateidx;
239
240 sta_info_put(sta);
241
242 sel->rate = &mode->rates[rateidx];
243}
244
245
246static void rate_control_simple_rate_init(void *priv, void *priv_sta,
247 struct ieee80211_local *local,
248 struct sta_info *sta)
249{
250 struct ieee80211_hw_mode *mode;
251 int i;
252 sta->txrate = 0;
253 mode = local->oper_hw_mode;
254 /* TODO: This routine should consider using RSSI from previous packets
255 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
256 * Until that method is implemented, we will use the lowest supported rate
257 * as a workaround, */
258 for (i = 0; i < mode->num_rates; i++) {
259 if ((sta->supp_rates & BIT(i)) &&
260 (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED)) {
261 sta->txrate = i;
262 break;
263 }
264 }
265}
266
267
268static void * rate_control_simple_alloc(struct ieee80211_local *local)
269{
270 struct global_rate_control *rctrl;
271
272 rctrl = kzalloc(sizeof(*rctrl), GFP_ATOMIC);
273
274 return rctrl;
275}
276
277
278static void rate_control_simple_free(void *priv)
279{
280 struct global_rate_control *rctrl = priv;
281 kfree(rctrl);
282}
283
284
285static void rate_control_simple_clear(void *priv)
286{
287}
288
289
290static void * rate_control_simple_alloc_sta(void *priv, gfp_t gfp)
291{
292 struct sta_rate_control *rctrl;
293
294 rctrl = kzalloc(sizeof(*rctrl), gfp);
295
296 return rctrl;
297}
298
299
300static void rate_control_simple_free_sta(void *priv, void *priv_sta)
301{
302 struct sta_rate_control *rctrl = priv_sta;
303 kfree(rctrl);
304}
305
306#ifdef CONFIG_MAC80211_DEBUGFS
307
308static int open_file_generic(struct inode *inode, struct file *file)
309{
310 file->private_data = inode->i_private;
311 return 0;
312}
313
314static ssize_t sta_tx_avg_rate_sum_read(struct file *file,
315 char __user *userbuf,
316 size_t count, loff_t *ppos)
317{
318 struct sta_rate_control *srctrl = file->private_data;
319 char buf[20];
320
321 sprintf(buf, "%d\n", srctrl->tx_avg_rate_sum);
322 return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
323}
324
325static const struct file_operations sta_tx_avg_rate_sum_ops = {
326 .read = sta_tx_avg_rate_sum_read,
327 .open = open_file_generic,
328};
329
330static ssize_t sta_tx_avg_rate_num_read(struct file *file,
331 char __user *userbuf,
332 size_t count, loff_t *ppos)
333{
334 struct sta_rate_control *srctrl = file->private_data;
335 char buf[20];
336
337 sprintf(buf, "%d\n", srctrl->tx_avg_rate_num);
338 return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
339}
340
341static const struct file_operations sta_tx_avg_rate_num_ops = {
342 .read = sta_tx_avg_rate_num_read,
343 .open = open_file_generic,
344};
345
346static void rate_control_simple_add_sta_debugfs(void *priv, void *priv_sta,
347 struct dentry *dir)
348{
349 struct sta_rate_control *srctrl = priv_sta;
350
351 srctrl->tx_avg_rate_num_dentry =
352 debugfs_create_file("rc_simple_sta_tx_avg_rate_num", 0400,
353 dir, srctrl, &sta_tx_avg_rate_num_ops);
354 srctrl->tx_avg_rate_sum_dentry =
355 debugfs_create_file("rc_simple_sta_tx_avg_rate_sum", 0400,
356 dir, srctrl, &sta_tx_avg_rate_sum_ops);
357}
358
359static void rate_control_simple_remove_sta_debugfs(void *priv, void *priv_sta)
360{
361 struct sta_rate_control *srctrl = priv_sta;
362
363 debugfs_remove(srctrl->tx_avg_rate_sum_dentry);
364 debugfs_remove(srctrl->tx_avg_rate_num_dentry);
365}
366#endif
367
368static struct rate_control_ops mac80211_rcsimple = {
369 .name = "simple",
370 .tx_status = rate_control_simple_tx_status,
371 .get_rate = rate_control_simple_get_rate,
372 .rate_init = rate_control_simple_rate_init,
373 .clear = rate_control_simple_clear,
374 .alloc = rate_control_simple_alloc,
375 .free = rate_control_simple_free,
376 .alloc_sta = rate_control_simple_alloc_sta,
377 .free_sta = rate_control_simple_free_sta,
378#ifdef CONFIG_MAC80211_DEBUGFS
379 .add_sta_debugfs = rate_control_simple_add_sta_debugfs,
380 .remove_sta_debugfs = rate_control_simple_remove_sta_debugfs,
381#endif
382};
383
384MODULE_LICENSE("GPL");
385MODULE_DESCRIPTION("Simple rate control algorithm");
386
387int __init rc80211_simple_init(void)
388{
389 return ieee80211_rate_control_register(&mac80211_rcsimple);
390}
391
392void rc80211_simple_exit(void)
393{
394 ieee80211_rate_control_unregister(&mac80211_rcsimple);
395}
396
397#ifdef CONFIG_MAC80211_RC_SIMPLE_MODULE
398module_init(rc80211_simple_init);
399module_exit(rc80211_simple_exit);
400#endif
diff --git a/net/mac80211/regdomain.c b/net/mac80211/regdomain.c
deleted file mode 100644
index f42678fa62d1..000000000000
--- a/net/mac80211/regdomain.c
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10/*
11 * This regulatory domain control implementation is known to be incomplete
12 * and confusing. mac80211 regulatory domain control will be significantly
13 * reworked in the not-too-distant future.
14 *
15 * For now, drivers wishing to control which channels are and aren't available
16 * are advised as follows:
17 * - set the IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED flag
18 * - continue to include *ALL* possible channels in the modes registered
19 * through ieee80211_register_hwmode()
20 * - for each allowable ieee80211_channel structure registered in the above
21 * call, set the flag member to some meaningful value such as
22 * IEEE80211_CHAN_W_SCAN | IEEE80211_CHAN_W_ACTIVE_SCAN |
23 * IEEE80211_CHAN_W_IBSS.
24 * - leave flag as 0 for non-allowable channels
25 *
26 * The usual implementation is for a driver to read a device EEPROM to
27 * determine which regulatory domain it should be operating under, then
28 * looking up the allowable channels in a driver-local table, then performing
29 * the above.
30 */
31
32#include <linux/module.h>
33#include <linux/netdevice.h>
34#include <net/mac80211.h>
35#include "ieee80211_i.h"
36
37static int ieee80211_regdom = 0x10; /* FCC */
38module_param(ieee80211_regdom, int, 0444);
39MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain; 64=MKK");
40
41/*
42 * If firmware is upgraded by the vendor, additional channels can be used based
43 * on the new Japanese regulatory rules. This is indicated by setting
44 * ieee80211_japan_5ghz module parameter to one when loading the 80211 kernel
45 * module.
46 */
47static int ieee80211_japan_5ghz /* = 0 */;
48module_param(ieee80211_japan_5ghz, int, 0444);
49MODULE_PARM_DESC(ieee80211_japan_5ghz, "Vendor-updated firmware for 5 GHz");
50
51
52struct ieee80211_channel_range {
53 short start_freq;
54 short end_freq;
55 unsigned char power_level;
56 unsigned char antenna_max;
57};
58
59static const struct ieee80211_channel_range ieee80211_fcc_channels[] = {
60 { 2412, 2462, 27, 6 } /* IEEE 802.11b/g, channels 1..11 */,
61 { 5180, 5240, 17, 6 } /* IEEE 802.11a, channels 36..48 */,
62 { 5260, 5320, 23, 6 } /* IEEE 802.11a, channels 52..64 */,
63 { 5745, 5825, 30, 6 } /* IEEE 802.11a, channels 149..165, outdoor */,
64 { 0 }
65};
66
67static const struct ieee80211_channel_range ieee80211_mkk_channels[] = {
68 { 2412, 2472, 20, 6 } /* IEEE 802.11b/g, channels 1..13 */,
69 { 5170, 5240, 20, 6 } /* IEEE 802.11a, channels 34..48 */,
70 { 5260, 5320, 20, 6 } /* IEEE 802.11a, channels 52..64 */,
71 { 0 }
72};
73
74
75static const struct ieee80211_channel_range *channel_range =
76 ieee80211_fcc_channels;
77
78
79static void ieee80211_unmask_channel(int mode, struct ieee80211_channel *chan)
80{
81 int i;
82
83 chan->flag = 0;
84
85 for (i = 0; channel_range[i].start_freq; i++) {
86 const struct ieee80211_channel_range *r = &channel_range[i];
87 if (r->start_freq <= chan->freq && r->end_freq >= chan->freq) {
88 if (ieee80211_regdom == 64 && !ieee80211_japan_5ghz &&
89 chan->freq >= 5260 && chan->freq <= 5320) {
90 /*
91 * Skip new channels in Japan since the
92 * firmware was not marked having been upgraded
93 * by the vendor.
94 */
95 continue;
96 }
97
98 if (ieee80211_regdom == 0x10 &&
99 (chan->freq == 5190 || chan->freq == 5210 ||
100 chan->freq == 5230)) {
101 /* Skip MKK channels when in FCC domain. */
102 continue;
103 }
104
105 chan->flag |= IEEE80211_CHAN_W_SCAN |
106 IEEE80211_CHAN_W_ACTIVE_SCAN |
107 IEEE80211_CHAN_W_IBSS;
108 chan->power_level = r->power_level;
109 chan->antenna_max = r->antenna_max;
110
111 if (ieee80211_regdom == 64 &&
112 (chan->freq == 5170 || chan->freq == 5190 ||
113 chan->freq == 5210 || chan->freq == 5230)) {
114 /*
115 * New regulatory rules in Japan have backwards
116 * compatibility with old channels in 5.15-5.25
117 * GHz band, but the station is not allowed to
118 * use active scan on these old channels.
119 */
120 chan->flag &= ~IEEE80211_CHAN_W_ACTIVE_SCAN;
121 }
122
123 if (ieee80211_regdom == 64 &&
124 (chan->freq == 5260 || chan->freq == 5280 ||
125 chan->freq == 5300 || chan->freq == 5320)) {
126 /*
127 * IBSS is not allowed on 5.25-5.35 GHz band
128 * due to radar detection requirements.
129 */
130 chan->flag &= ~IEEE80211_CHAN_W_IBSS;
131 }
132
133 break;
134 }
135 }
136}
137
138
139void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode)
140{
141 int c;
142 for (c = 0; c < mode->num_channels; c++)
143 ieee80211_unmask_channel(mode->mode, &mode->channels[c]);
144}
145
146
147void ieee80211_regdomain_init(void)
148{
149 if (ieee80211_regdom == 0x40)
150 channel_range = ieee80211_mkk_channels;
151}
152
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a8a40aba846b..52e4554fdde7 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/jiffies.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14#include <linux/netdevice.h> 15#include <linux/netdevice.h>
@@ -18,7 +19,8 @@
18#include <net/ieee80211_radiotap.h> 19#include <net/ieee80211_radiotap.h>
19 20
20#include "ieee80211_i.h" 21#include "ieee80211_i.h"
21#include "ieee80211_led.h" 22#include "led.h"
23#include "mesh.h"
22#include "wep.h" 24#include "wep.h"
23#include "wpa.h" 25#include "wpa.h"
24#include "tkip.h" 26#include "tkip.h"
@@ -82,10 +84,10 @@ static inline int should_drop_frame(struct ieee80211_rx_status *status,
82 */ 84 */
83static struct sk_buff * 85static struct sk_buff *
84ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 86ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
85 struct ieee80211_rx_status *status) 87 struct ieee80211_rx_status *status,
88 struct ieee80211_rate *rate)
86{ 89{
87 struct ieee80211_sub_if_data *sdata; 90 struct ieee80211_sub_if_data *sdata;
88 struct ieee80211_rate *rate;
89 int needed_headroom = 0; 91 int needed_headroom = 0;
90 struct ieee80211_radiotap_header *rthdr; 92 struct ieee80211_radiotap_header *rthdr;
91 __le64 *rttsft = NULL; 93 __le64 *rttsft = NULL;
@@ -194,14 +196,11 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
194 rtfixed->rx_flags |= 196 rtfixed->rx_flags |=
195 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); 197 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
196 198
197 rate = ieee80211_get_rate(local, status->phymode, 199 rtfixed->rate = rate->bitrate / 5;
198 status->rate);
199 if (rate)
200 rtfixed->rate = rate->rate / 5;
201 200
202 rtfixed->chan_freq = cpu_to_le16(status->freq); 201 rtfixed->chan_freq = cpu_to_le16(status->freq);
203 202
204 if (status->phymode == MODE_IEEE80211A) 203 if (status->band == IEEE80211_BAND_5GHZ)
205 rtfixed->chan_flags = 204 rtfixed->chan_flags =
206 cpu_to_le16(IEEE80211_CHAN_OFDM | 205 cpu_to_le16(IEEE80211_CHAN_OFDM |
207 IEEE80211_CHAN_5GHZ); 206 IEEE80211_CHAN_5GHZ);
@@ -226,6 +225,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
226 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR) 225 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR)
227 continue; 226 continue;
228 227
228 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
229 continue;
230
229 if (prev_dev) { 231 if (prev_dev) {
230 skb2 = skb_clone(skb, GFP_ATOMIC); 232 skb2 = skb_clone(skb, GFP_ATOMIC);
231 if (skb2) { 233 if (skb2) {
@@ -249,15 +251,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
249} 251}
250 252
251 253
252/* pre-rx handlers 254static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
253 *
254 * these don't have dev/sdata fields in the rx data
255 * The sta value should also not be used because it may
256 * be NULL even though a STA (in IBSS mode) will be added.
257 */
258
259static ieee80211_txrx_result
260ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx)
261{ 255{
262 u8 *data = rx->skb->data; 256 u8 *data = rx->skb->data;
263 int tid; 257 int tid;
@@ -268,9 +262,9 @@ ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx)
268 /* frame has qos control */ 262 /* frame has qos control */
269 tid = qc[0] & QOS_CONTROL_TID_MASK; 263 tid = qc[0] & QOS_CONTROL_TID_MASK;
270 if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) 264 if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
271 rx->flags |= IEEE80211_TXRXD_RX_AMSDU; 265 rx->flags |= IEEE80211_RX_AMSDU;
272 else 266 else
273 rx->flags &= ~IEEE80211_TXRXD_RX_AMSDU; 267 rx->flags &= ~IEEE80211_RX_AMSDU;
274 } else { 268 } else {
275 if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) { 269 if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) {
276 /* Separate TID for management frames */ 270 /* Separate TID for management frames */
@@ -286,68 +280,19 @@ ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx)
286 if (rx->sta) 280 if (rx->sta)
287 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]); 281 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]);
288 282
289 rx->u.rx.queue = tid; 283 rx->queue = tid;
290 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 284 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
291 * For now, set skb->priority to 0 for other cases. */ 285 * For now, set skb->priority to 0 for other cases. */
292 rx->skb->priority = (tid > 7) ? 0 : tid; 286 rx->skb->priority = (tid > 7) ? 0 : tid;
293
294 return TXRX_CONTINUE;
295} 287}
296 288
297 289static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx)
298static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
299 struct sk_buff *skb,
300 struct ieee80211_rx_status *status)
301{ 290{
302 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
303 u32 load = 0, hdrtime;
304 struct ieee80211_rate *rate;
305 struct ieee80211_hw_mode *mode = local->hw.conf.mode;
306 int i;
307
308 /* Estimate total channel use caused by this frame */
309
310 if (unlikely(mode->num_rates < 0))
311 return TXRX_CONTINUE;
312
313 rate = &mode->rates[0];
314 for (i = 0; i < mode->num_rates; i++) {
315 if (mode->rates[i].val == status->rate) {
316 rate = &mode->rates[i];
317 break;
318 }
319 }
320
321 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
322 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
323
324 if (mode->mode == MODE_IEEE80211A ||
325 (mode->mode == MODE_IEEE80211G &&
326 rate->flags & IEEE80211_RATE_ERP))
327 hdrtime = CHAN_UTIL_HDR_SHORT;
328 else
329 hdrtime = CHAN_UTIL_HDR_LONG;
330
331 load = hdrtime;
332 if (!is_multicast_ether_addr(hdr->addr1))
333 load += hdrtime;
334
335 load += skb->len * rate->rate_inv;
336
337 /* Divide channel_use by 8 to avoid wrapping around the counter */
338 load >>= CHAN_UTIL_SHIFT;
339
340 return load;
341}
342
343#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT 291#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
344static ieee80211_txrx_result
345ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx)
346{
347 int hdrlen; 292 int hdrlen;
348 293
349 if (!WLAN_FC_DATA_PRESENT(rx->fc)) 294 if (!WLAN_FC_DATA_PRESENT(rx->fc))
350 return TXRX_CONTINUE; 295 return;
351 296
352 /* 297 /*
353 * Drivers are required to align the payload data in a way that 298 * Drivers are required to align the payload data in a way that
@@ -369,83 +314,158 @@ ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx)
369 * to move the 802.11 header further back in that case. 314 * to move the 802.11 header further back in that case.
370 */ 315 */
371 hdrlen = ieee80211_get_hdrlen(rx->fc); 316 hdrlen = ieee80211_get_hdrlen(rx->fc);
372 if (rx->flags & IEEE80211_TXRXD_RX_AMSDU) 317 if (rx->flags & IEEE80211_RX_AMSDU)
373 hdrlen += ETH_HLEN; 318 hdrlen += ETH_HLEN;
374 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); 319 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3);
375
376 return TXRX_CONTINUE;
377}
378#endif 320#endif
321}
379 322
380ieee80211_rx_handler ieee80211_rx_pre_handlers[] = 323
324static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
325 struct sk_buff *skb,
326 struct ieee80211_rx_status *status,
327 struct ieee80211_rate *rate)
381{ 328{
382 ieee80211_rx_h_parse_qos, 329 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
383#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT 330 u32 load = 0, hdrtime;
384 ieee80211_rx_h_verify_ip_alignment, 331
385#endif 332 /* Estimate total channel use caused by this frame */
386 NULL 333
387}; 334 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
335 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
336
337 if (status->band == IEEE80211_BAND_5GHZ ||
338 (status->band == IEEE80211_BAND_5GHZ &&
339 rate->flags & IEEE80211_RATE_ERP_G))
340 hdrtime = CHAN_UTIL_HDR_SHORT;
341 else
342 hdrtime = CHAN_UTIL_HDR_LONG;
343
344 load = hdrtime;
345 if (!is_multicast_ether_addr(hdr->addr1))
346 load += hdrtime;
347
348 /* TODO: optimise again */
349 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
350
351 /* Divide channel_use by 8 to avoid wrapping around the counter */
352 load >>= CHAN_UTIL_SHIFT;
353
354 return load;
355}
388 356
389/* rx handlers */ 357/* rx handlers */
390 358
391static ieee80211_txrx_result 359static ieee80211_rx_result
392ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx) 360ieee80211_rx_h_if_stats(struct ieee80211_rx_data *rx)
393{ 361{
394 if (rx->sta) 362 if (rx->sta)
395 rx->sta->channel_use_raw += rx->u.rx.load; 363 rx->sta->channel_use_raw += rx->load;
396 rx->sdata->channel_use_raw += rx->u.rx.load; 364 rx->sdata->channel_use_raw += rx->load;
397 return TXRX_CONTINUE; 365 return RX_CONTINUE;
398} 366}
399 367
400static ieee80211_txrx_result 368static ieee80211_rx_result
401ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx) 369ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
402{ 370{
403 struct ieee80211_local *local = rx->local; 371 struct ieee80211_local *local = rx->local;
404 struct sk_buff *skb = rx->skb; 372 struct sk_buff *skb = rx->skb;
405 373
406 if (unlikely(local->sta_hw_scanning)) 374 if (unlikely(local->sta_hw_scanning))
407 return ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status); 375 return ieee80211_sta_rx_scan(rx->dev, skb, rx->status);
408 376
409 if (unlikely(local->sta_sw_scanning)) { 377 if (unlikely(local->sta_sw_scanning)) {
410 /* drop all the other packets during a software scan anyway */ 378 /* drop all the other packets during a software scan anyway */
411 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status) 379 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->status)
412 != TXRX_QUEUED) 380 != RX_QUEUED)
413 dev_kfree_skb(skb); 381 dev_kfree_skb(skb);
414 return TXRX_QUEUED; 382 return RX_QUEUED;
415 } 383 }
416 384
417 if (unlikely(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) { 385 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
418 /* scanning finished during invoking of handlers */ 386 /* scanning finished during invoking of handlers */
419 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); 387 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
420 return TXRX_DROP; 388 return RX_DROP_UNUSABLE;
421 } 389 }
422 390
423 return TXRX_CONTINUE; 391 return RX_CONTINUE;
392}
393
394static ieee80211_rx_result
395ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
396{
397 int hdrlen = ieee80211_get_hdrlen(rx->fc);
398 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
399
400#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
401
402 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
403 if (!((rx->fc & IEEE80211_FCTL_FROMDS) &&
404 (rx->fc & IEEE80211_FCTL_TODS)))
405 return RX_DROP_MONITOR;
406 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
407 return RX_DROP_MONITOR;
408 }
409
410 /* If there is not an established peer link and this is not a peer link
411 * establisment frame, beacon or probe, drop the frame.
412 */
413
414 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
415 struct ieee80211_mgmt *mgmt;
416
417 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT)
418 return RX_DROP_MONITOR;
419
420 switch (rx->fc & IEEE80211_FCTL_STYPE) {
421 case IEEE80211_STYPE_ACTION:
422 mgmt = (struct ieee80211_mgmt *)hdr;
423 if (mgmt->u.action.category != PLINK_CATEGORY)
424 return RX_DROP_MONITOR;
425 /* fall through on else */
426 case IEEE80211_STYPE_PROBE_REQ:
427 case IEEE80211_STYPE_PROBE_RESP:
428 case IEEE80211_STYPE_BEACON:
429 return RX_CONTINUE;
430 break;
431 default:
432 return RX_DROP_MONITOR;
433 }
434
435 } else if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
436 is_multicast_ether_addr(hdr->addr1) &&
437 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev))
438 return RX_DROP_MONITOR;
439#undef msh_h_get
440
441 return RX_CONTINUE;
424} 442}
425 443
426static ieee80211_txrx_result 444
427ieee80211_rx_h_check(struct ieee80211_txrx_data *rx) 445static ieee80211_rx_result
446ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
428{ 447{
429 struct ieee80211_hdr *hdr; 448 struct ieee80211_hdr *hdr;
449
430 hdr = (struct ieee80211_hdr *) rx->skb->data; 450 hdr = (struct ieee80211_hdr *) rx->skb->data;
431 451
432 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 452 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
433 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 453 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
434 if (unlikely(rx->fc & IEEE80211_FCTL_RETRY && 454 if (unlikely(rx->fc & IEEE80211_FCTL_RETRY &&
435 rx->sta->last_seq_ctrl[rx->u.rx.queue] == 455 rx->sta->last_seq_ctrl[rx->queue] ==
436 hdr->seq_ctrl)) { 456 hdr->seq_ctrl)) {
437 if (rx->flags & IEEE80211_TXRXD_RXRA_MATCH) { 457 if (rx->flags & IEEE80211_RX_RA_MATCH) {
438 rx->local->dot11FrameDuplicateCount++; 458 rx->local->dot11FrameDuplicateCount++;
439 rx->sta->num_duplicates++; 459 rx->sta->num_duplicates++;
440 } 460 }
441 return TXRX_DROP; 461 return RX_DROP_MONITOR;
442 } else 462 } else
443 rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl; 463 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
444 } 464 }
445 465
446 if (unlikely(rx->skb->len < 16)) { 466 if (unlikely(rx->skb->len < 16)) {
447 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 467 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
448 return TXRX_DROP; 468 return RX_DROP_MONITOR;
449 } 469 }
450 470
451 /* Drop disallowed frame classes based on STA auth/assoc state; 471 /* Drop disallowed frame classes based on STA auth/assoc state;
@@ -456,6 +476,10 @@ ieee80211_rx_h_check(struct ieee80211_txrx_data *rx)
456 * deauth/disassoc frames when needed. In addition, hostapd is 476 * deauth/disassoc frames when needed. In addition, hostapd is
457 * responsible for filtering on both auth and assoc states. 477 * responsible for filtering on both auth and assoc states.
458 */ 478 */
479
480 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
481 return ieee80211_rx_mesh_check(rx);
482
459 if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA || 483 if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA ||
460 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && 484 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL &&
461 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && 485 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) &&
@@ -464,26 +488,26 @@ ieee80211_rx_h_check(struct ieee80211_txrx_data *rx)
464 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && 488 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) &&
465 !(rx->fc & IEEE80211_FCTL_TODS) && 489 !(rx->fc & IEEE80211_FCTL_TODS) &&
466 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 490 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
467 || !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { 491 || !(rx->flags & IEEE80211_RX_RA_MATCH)) {
468 /* Drop IBSS frames and frames for other hosts 492 /* Drop IBSS frames and frames for other hosts
469 * silently. */ 493 * silently. */
470 return TXRX_DROP; 494 return RX_DROP_MONITOR;
471 } 495 }
472 496
473 return TXRX_DROP; 497 return RX_DROP_MONITOR;
474 } 498 }
475 499
476 return TXRX_CONTINUE; 500 return RX_CONTINUE;
477} 501}
478 502
479 503
480static ieee80211_txrx_result 504static ieee80211_rx_result
481ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) 505ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
482{ 506{
483 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 507 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
484 int keyidx; 508 int keyidx;
485 int hdrlen; 509 int hdrlen;
486 ieee80211_txrx_result result = TXRX_DROP; 510 ieee80211_rx_result result = RX_DROP_UNUSABLE;
487 struct ieee80211_key *stakey = NULL; 511 struct ieee80211_key *stakey = NULL;
488 512
489 /* 513 /*
@@ -513,14 +537,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
513 */ 537 */
514 538
515 if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) 539 if (!(rx->fc & IEEE80211_FCTL_PROTECTED))
516 return TXRX_CONTINUE; 540 return RX_CONTINUE;
517 541
518 /* 542 /*
519 * No point in finding a key and decrypting if the frame is neither 543 * No point in finding a key and decrypting if the frame is neither
520 * addressed to us nor a multicast frame. 544 * addressed to us nor a multicast frame.
521 */ 545 */
522 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 546 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
523 return TXRX_CONTINUE; 547 return RX_CONTINUE;
524 548
525 if (rx->sta) 549 if (rx->sta)
526 stakey = rcu_dereference(rx->sta->key); 550 stakey = rcu_dereference(rx->sta->key);
@@ -537,14 +561,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
537 * we somehow allow the driver to tell us which key 561 * we somehow allow the driver to tell us which key
538 * the hardware used if this flag is set? 562 * the hardware used if this flag is set?
539 */ 563 */
540 if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && 564 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
541 (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) 565 (rx->status->flag & RX_FLAG_IV_STRIPPED))
542 return TXRX_CONTINUE; 566 return RX_CONTINUE;
543 567
544 hdrlen = ieee80211_get_hdrlen(rx->fc); 568 hdrlen = ieee80211_get_hdrlen(rx->fc);
545 569
546 if (rx->skb->len < 8 + hdrlen) 570 if (rx->skb->len < 8 + hdrlen)
547 return TXRX_DROP; /* TODO: count this? */ 571 return RX_DROP_UNUSABLE; /* TODO: count this? */
548 572
549 /* 573 /*
550 * no need to call ieee80211_wep_get_keyidx, 574 * no need to call ieee80211_wep_get_keyidx,
@@ -573,14 +597,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
573 printk(KERN_DEBUG "%s: RX protected frame," 597 printk(KERN_DEBUG "%s: RX protected frame,"
574 " but have no key\n", rx->dev->name); 598 " but have no key\n", rx->dev->name);
575#endif /* CONFIG_MAC80211_DEBUG */ 599#endif /* CONFIG_MAC80211_DEBUG */
576 return TXRX_DROP; 600 return RX_DROP_MONITOR;
577 } 601 }
578 602
579 /* Check for weak IVs if possible */ 603 /* Check for weak IVs if possible */
580 if (rx->sta && rx->key->conf.alg == ALG_WEP && 604 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
581 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && 605 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
582 (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) || 606 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
583 !(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) && 607 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
584 ieee80211_wep_is_weak_iv(rx->skb, rx->key)) 608 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
585 rx->sta->wep_weak_iv_count++; 609 rx->sta->wep_weak_iv_count++;
586 610
@@ -597,7 +621,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
597 } 621 }
598 622
599 /* either the frame has been decrypted or will be dropped */ 623 /* either the frame has been decrypted or will be dropped */
600 rx->u.rx.status->flag |= RX_FLAG_DECRYPTED; 624 rx->status->flag |= RX_FLAG_DECRYPTED;
601 625
602 return result; 626 return result;
603} 627}
@@ -607,12 +631,12 @@ static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
607 struct ieee80211_sub_if_data *sdata; 631 struct ieee80211_sub_if_data *sdata;
608 DECLARE_MAC_BUF(mac); 632 DECLARE_MAC_BUF(mac);
609 633
610 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 634 sdata = sta->sdata;
611 635
612 if (sdata->bss) 636 if (sdata->bss)
613 atomic_inc(&sdata->bss->num_sta_ps); 637 atomic_inc(&sdata->bss->num_sta_ps);
614 sta->flags |= WLAN_STA_PS; 638 sta->flags |= WLAN_STA_PS;
615 sta->pspoll = 0; 639 sta->flags &= ~WLAN_STA_PSPOLL;
616#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 640#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
617 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", 641 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
618 dev->name, print_mac(mac, sta->addr), sta->aid); 642 dev->name, print_mac(mac, sta->addr), sta->aid);
@@ -628,21 +652,21 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
628 struct ieee80211_tx_packet_data *pkt_data; 652 struct ieee80211_tx_packet_data *pkt_data;
629 DECLARE_MAC_BUF(mac); 653 DECLARE_MAC_BUF(mac);
630 654
631 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 655 sdata = sta->sdata;
656
632 if (sdata->bss) 657 if (sdata->bss)
633 atomic_dec(&sdata->bss->num_sta_ps); 658 atomic_dec(&sdata->bss->num_sta_ps);
634 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_TIM); 659
635 sta->pspoll = 0; 660 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_PSPOLL);
636 if (!skb_queue_empty(&sta->ps_tx_buf)) { 661
637 if (local->ops->set_tim) 662 if (!skb_queue_empty(&sta->ps_tx_buf))
638 local->ops->set_tim(local_to_hw(local), sta->aid, 0); 663 sta_info_clear_tim_bit(sta);
639 if (sdata->bss) 664
640 bss_tim_clear(local, sdata->bss, sta->aid);
641 }
642#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 665#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
643 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", 666 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
644 dev->name, print_mac(mac, sta->addr), sta->aid); 667 dev->name, print_mac(mac, sta->addr), sta->aid);
645#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 668#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
669
646 /* Send all buffered frames to the station */ 670 /* Send all buffered frames to the station */
647 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 671 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
648 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 672 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb;
@@ -666,15 +690,15 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
666 return sent; 690 return sent;
667} 691}
668 692
669static ieee80211_txrx_result 693static ieee80211_rx_result
670ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx) 694ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
671{ 695{
672 struct sta_info *sta = rx->sta; 696 struct sta_info *sta = rx->sta;
673 struct net_device *dev = rx->dev; 697 struct net_device *dev = rx->dev;
674 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 698 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
675 699
676 if (!sta) 700 if (!sta)
677 return TXRX_CONTINUE; 701 return RX_CONTINUE;
678 702
679 /* Update last_rx only for IBSS packets which are for the current 703 /* Update last_rx only for IBSS packets which are for the current
680 * BSSID to avoid keeping the current IBSS network alive in cases where 704 * BSSID to avoid keeping the current IBSS network alive in cases where
@@ -690,24 +714,26 @@ ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx)
690 /* Update last_rx only for unicast frames in order to prevent 714 /* Update last_rx only for unicast frames in order to prevent
691 * the Probe Request frames (the only broadcast frames from a 715 * the Probe Request frames (the only broadcast frames from a
692 * STA in infrastructure mode) from keeping a connection alive. 716 * STA in infrastructure mode) from keeping a connection alive.
717 * Mesh beacons will update last_rx when if they are found to
718 * match the current local configuration when processed.
693 */ 719 */
694 sta->last_rx = jiffies; 720 sta->last_rx = jiffies;
695 } 721 }
696 722
697 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 723 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
698 return TXRX_CONTINUE; 724 return RX_CONTINUE;
699 725
700 sta->rx_fragments++; 726 sta->rx_fragments++;
701 sta->rx_bytes += rx->skb->len; 727 sta->rx_bytes += rx->skb->len;
702 sta->last_rssi = rx->u.rx.status->ssi; 728 sta->last_rssi = rx->status->ssi;
703 sta->last_signal = rx->u.rx.status->signal; 729 sta->last_signal = rx->status->signal;
704 sta->last_noise = rx->u.rx.status->noise; 730 sta->last_noise = rx->status->noise;
705 731
706 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { 732 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) {
707 /* Change STA power saving mode only in the end of a frame 733 /* Change STA power saving mode only in the end of a frame
708 * exchange sequence */ 734 * exchange sequence */
709 if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) 735 if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM))
710 rx->u.rx.sent_ps_buffered += ap_sta_ps_end(dev, sta); 736 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta);
711 else if (!(sta->flags & WLAN_STA_PS) && 737 else if (!(sta->flags & WLAN_STA_PS) &&
712 (rx->fc & IEEE80211_FCTL_PM)) 738 (rx->fc & IEEE80211_FCTL_PM))
713 ap_sta_ps_start(dev, sta); 739 ap_sta_ps_start(dev, sta);
@@ -722,10 +748,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx)
722 * as a dropped packed. */ 748 * as a dropped packed. */
723 sta->rx_packets++; 749 sta->rx_packets++;
724 dev_kfree_skb(rx->skb); 750 dev_kfree_skb(rx->skb);
725 return TXRX_QUEUED; 751 return RX_QUEUED;
726 } 752 }
727 753
728 return TXRX_CONTINUE; 754 return RX_CONTINUE;
729} /* ieee80211_rx_h_sta_process */ 755} /* ieee80211_rx_h_sta_process */
730 756
731static inline struct ieee80211_fragment_entry * 757static inline struct ieee80211_fragment_entry *
@@ -801,7 +827,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
801 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 827 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
802 continue; 828 continue;
803 829
804 if (entry->first_frag_time + 2 * HZ < jiffies) { 830 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
805 __skb_queue_purge(&entry->skb_list); 831 __skb_queue_purge(&entry->skb_list);
806 continue; 832 continue;
807 } 833 }
@@ -811,8 +837,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
811 return NULL; 837 return NULL;
812} 838}
813 839
814static ieee80211_txrx_result 840static ieee80211_rx_result
815ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) 841ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
816{ 842{
817 struct ieee80211_hdr *hdr; 843 struct ieee80211_hdr *hdr;
818 u16 sc; 844 u16 sc;
@@ -838,27 +864,27 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
838 if (frag == 0) { 864 if (frag == 0) {
839 /* This is the first fragment of a new frame. */ 865 /* This is the first fragment of a new frame. */
840 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 866 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
841 rx->u.rx.queue, &(rx->skb)); 867 rx->queue, &(rx->skb));
842 if (rx->key && rx->key->conf.alg == ALG_CCMP && 868 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
843 (rx->fc & IEEE80211_FCTL_PROTECTED)) { 869 (rx->fc & IEEE80211_FCTL_PROTECTED)) {
844 /* Store CCMP PN so that we can verify that the next 870 /* Store CCMP PN so that we can verify that the next
845 * fragment has a sequential PN value. */ 871 * fragment has a sequential PN value. */
846 entry->ccmp = 1; 872 entry->ccmp = 1;
847 memcpy(entry->last_pn, 873 memcpy(entry->last_pn,
848 rx->key->u.ccmp.rx_pn[rx->u.rx.queue], 874 rx->key->u.ccmp.rx_pn[rx->queue],
849 CCMP_PN_LEN); 875 CCMP_PN_LEN);
850 } 876 }
851 return TXRX_QUEUED; 877 return RX_QUEUED;
852 } 878 }
853 879
854 /* This is a fragment for a frame that should already be pending in 880 /* This is a fragment for a frame that should already be pending in
855 * fragment cache. Add this fragment to the end of the pending entry. 881 * fragment cache. Add this fragment to the end of the pending entry.
856 */ 882 */
857 entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, 883 entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq,
858 rx->u.rx.queue, hdr); 884 rx->queue, hdr);
859 if (!entry) { 885 if (!entry) {
860 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 886 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
861 return TXRX_DROP; 887 return RX_DROP_MONITOR;
862 } 888 }
863 889
864 /* Verify that MPDUs within one MSDU have sequential PN values. 890 /* Verify that MPDUs within one MSDU have sequential PN values.
@@ -867,14 +893,14 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
867 int i; 893 int i;
868 u8 pn[CCMP_PN_LEN], *rpn; 894 u8 pn[CCMP_PN_LEN], *rpn;
869 if (!rx->key || rx->key->conf.alg != ALG_CCMP) 895 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
870 return TXRX_DROP; 896 return RX_DROP_UNUSABLE;
871 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 897 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
872 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 898 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
873 pn[i]++; 899 pn[i]++;
874 if (pn[i]) 900 if (pn[i])
875 break; 901 break;
876 } 902 }
877 rpn = rx->key->u.ccmp.rx_pn[rx->u.rx.queue]; 903 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
878 if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { 904 if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) {
879 if (net_ratelimit()) 905 if (net_ratelimit())
880 printk(KERN_DEBUG "%s: defrag: CCMP PN not " 906 printk(KERN_DEBUG "%s: defrag: CCMP PN not "
@@ -885,7 +911,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
885 rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], 911 rpn[0], rpn[1], rpn[2], rpn[3], rpn[4],
886 rpn[5], pn[0], pn[1], pn[2], pn[3], 912 rpn[5], pn[0], pn[1], pn[2], pn[3],
887 pn[4], pn[5]); 913 pn[4], pn[5]);
888 return TXRX_DROP; 914 return RX_DROP_UNUSABLE;
889 } 915 }
890 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 916 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
891 } 917 }
@@ -896,7 +922,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
896 entry->extra_len += rx->skb->len; 922 entry->extra_len += rx->skb->len;
897 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { 923 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) {
898 rx->skb = NULL; 924 rx->skb = NULL;
899 return TXRX_QUEUED; 925 return RX_QUEUED;
900 } 926 }
901 927
902 rx->skb = __skb_dequeue(&entry->skb_list); 928 rx->skb = __skb_dequeue(&entry->skb_list);
@@ -906,7 +932,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
906 GFP_ATOMIC))) { 932 GFP_ATOMIC))) {
907 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 933 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
908 __skb_queue_purge(&entry->skb_list); 934 __skb_queue_purge(&entry->skb_list);
909 return TXRX_DROP; 935 return RX_DROP_UNUSABLE;
910 } 936 }
911 } 937 }
912 while ((skb = __skb_dequeue(&entry->skb_list))) { 938 while ((skb = __skb_dequeue(&entry->skb_list))) {
@@ -915,7 +941,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
915 } 941 }
916 942
917 /* Complete frame has been reassembled - process it now */ 943 /* Complete frame has been reassembled - process it now */
918 rx->flags |= IEEE80211_TXRXD_FRAGMENTED; 944 rx->flags |= IEEE80211_RX_FRAGMENTED;
919 945
920 out: 946 out:
921 if (rx->sta) 947 if (rx->sta)
@@ -924,11 +950,11 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
924 rx->local->dot11MulticastReceivedFrameCount++; 950 rx->local->dot11MulticastReceivedFrameCount++;
925 else 951 else
926 ieee80211_led_rx(rx->local); 952 ieee80211_led_rx(rx->local);
927 return TXRX_CONTINUE; 953 return RX_CONTINUE;
928} 954}
929 955
930static ieee80211_txrx_result 956static ieee80211_rx_result
931ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) 957ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
932{ 958{
933 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 959 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
934 struct sk_buff *skb; 960 struct sk_buff *skb;
@@ -938,12 +964,12 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
938 if (likely(!rx->sta || 964 if (likely(!rx->sta ||
939 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || 965 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL ||
940 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || 966 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL ||
941 !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))) 967 !(rx->flags & IEEE80211_RX_RA_MATCH)))
942 return TXRX_CONTINUE; 968 return RX_CONTINUE;
943 969
944 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) && 970 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) &&
945 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) 971 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
946 return TXRX_DROP; 972 return RX_DROP_UNUSABLE;
947 973
948 skb = skb_dequeue(&rx->sta->tx_filtered); 974 skb = skb_dequeue(&rx->sta->tx_filtered);
949 if (!skb) { 975 if (!skb) {
@@ -958,9 +984,11 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
958 struct ieee80211_hdr *hdr = 984 struct ieee80211_hdr *hdr =
959 (struct ieee80211_hdr *) skb->data; 985 (struct ieee80211_hdr *) skb->data;
960 986
961 /* tell TX path to send one frame even though the STA may 987 /*
962 * still remain is PS mode after this frame exchange */ 988 * Tell TX path to send one frame even though the STA may
963 rx->sta->pspoll = 1; 989 * still remain is PS mode after this frame exchange.
990 */
991 rx->sta->flags |= WLAN_STA_PSPOLL;
964 992
965#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 993#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
966 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", 994 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
@@ -970,46 +998,45 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
970 998
971 /* Use MoreData flag to indicate whether there are more 999 /* Use MoreData flag to indicate whether there are more
972 * buffered frames for this STA */ 1000 * buffered frames for this STA */
973 if (no_pending_pkts) { 1001 if (no_pending_pkts)
974 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1002 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
975 rx->sta->flags &= ~WLAN_STA_TIM; 1003 else
976 } else
977 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1004 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
978 1005
979 dev_queue_xmit(skb); 1006 dev_queue_xmit(skb);
980 1007
981 if (no_pending_pkts) { 1008 if (no_pending_pkts)
982 if (rx->local->ops->set_tim) 1009 sta_info_clear_tim_bit(rx->sta);
983 rx->local->ops->set_tim(local_to_hw(rx->local),
984 rx->sta->aid, 0);
985 if (rx->sdata->bss)
986 bss_tim_clear(rx->local, rx->sdata->bss, rx->sta->aid);
987 }
988#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1010#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
989 } else if (!rx->u.rx.sent_ps_buffered) { 1011 } else if (!rx->sent_ps_buffered) {
1012 /*
1013 * FIXME: This can be the result of a race condition between
1014 * us expiring a frame and the station polling for it.
1015 * Should we send it a null-func frame indicating we
1016 * have nothing buffered for it?
1017 */
990 printk(KERN_DEBUG "%s: STA %s sent PS Poll even " 1018 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
991 "though there is no buffered frames for it\n", 1019 "though there is no buffered frames for it\n",
992 rx->dev->name, print_mac(mac, rx->sta->addr)); 1020 rx->dev->name, print_mac(mac, rx->sta->addr));
993#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1021#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
994
995 } 1022 }
996 1023
997 /* Free PS Poll skb here instead of returning TXRX_DROP that would 1024 /* Free PS Poll skb here instead of returning RX_DROP that would
998 * count as an dropped frame. */ 1025 * count as an dropped frame. */
999 dev_kfree_skb(rx->skb); 1026 dev_kfree_skb(rx->skb);
1000 1027
1001 return TXRX_QUEUED; 1028 return RX_QUEUED;
1002} 1029}
1003 1030
1004static ieee80211_txrx_result 1031static ieee80211_rx_result
1005ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx) 1032ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1006{ 1033{
1007 u16 fc = rx->fc; 1034 u16 fc = rx->fc;
1008 u8 *data = rx->skb->data; 1035 u8 *data = rx->skb->data;
1009 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; 1036 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data;
1010 1037
1011 if (!WLAN_FC_IS_QOS_DATA(fc)) 1038 if (!WLAN_FC_IS_QOS_DATA(fc))
1012 return TXRX_CONTINUE; 1039 return RX_CONTINUE;
1013 1040
1014 /* remove the qos control field, update frame type and meta-data */ 1041 /* remove the qos control field, update frame type and meta-data */
1015 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); 1042 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2);
@@ -1018,17 +1045,17 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx)
1018 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; 1045 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA;
1019 hdr->frame_control = cpu_to_le16(fc); 1046 hdr->frame_control = cpu_to_le16(fc);
1020 1047
1021 return TXRX_CONTINUE; 1048 return RX_CONTINUE;
1022} 1049}
1023 1050
1024static int 1051static int
1025ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx) 1052ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1026{ 1053{
1027 if (unlikely(rx->sdata->ieee802_1x_pac && 1054 if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) {
1028 (!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED)))) {
1029#ifdef CONFIG_MAC80211_DEBUG 1055#ifdef CONFIG_MAC80211_DEBUG
1030 printk(KERN_DEBUG "%s: dropped frame " 1056 if (net_ratelimit())
1031 "(unauthorized port)\n", rx->dev->name); 1057 printk(KERN_DEBUG "%s: dropped frame "
1058 "(unauthorized port)\n", rx->dev->name);
1032#endif /* CONFIG_MAC80211_DEBUG */ 1059#endif /* CONFIG_MAC80211_DEBUG */
1033 return -EACCES; 1060 return -EACCES;
1034 } 1061 }
@@ -1037,13 +1064,13 @@ ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx)
1037} 1064}
1038 1065
1039static int 1066static int
1040ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx) 1067ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx)
1041{ 1068{
1042 /* 1069 /*
1043 * Pass through unencrypted frames if the hardware has 1070 * Pass through unencrypted frames if the hardware has
1044 * decrypted them already. 1071 * decrypted them already.
1045 */ 1072 */
1046 if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) 1073 if (rx->status->flag & RX_FLAG_DECRYPTED)
1047 return 0; 1074 return 0;
1048 1075
1049 /* Drop unencrypted frames if key is set. */ 1076 /* Drop unencrypted frames if key is set. */
@@ -1057,7 +1084,7 @@ ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx)
1057} 1084}
1058 1085
1059static int 1086static int
1060ieee80211_data_to_8023(struct ieee80211_txrx_data *rx) 1087ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1061{ 1088{
1062 struct net_device *dev = rx->dev; 1089 struct net_device *dev = rx->dev;
1063 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 1090 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
@@ -1079,6 +1106,21 @@ ieee80211_data_to_8023(struct ieee80211_txrx_data *rx)
1079 1106
1080 hdrlen = ieee80211_get_hdrlen(fc); 1107 hdrlen = ieee80211_get_hdrlen(fc);
1081 1108
1109 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1110 int meshhdrlen = ieee80211_get_mesh_hdrlen(
1111 (struct ieee80211s_hdr *) (skb->data + hdrlen));
1112 /* Copy on cb:
1113 * - mesh header: to be used for mesh forwarding
1114 * decision. It will also be used as mesh header template at
1115 * tx.c:ieee80211_subif_start_xmit() if interface
1116 * type is mesh and skb->pkt_type == PACKET_OTHERHOST
1117 * - ta: to be used if a RERR needs to be sent.
1118 */
1119 memcpy(skb->cb, skb->data + hdrlen, meshhdrlen);
1120 memcpy(MESH_PREQ(skb), hdr->addr2, ETH_ALEN);
1121 hdrlen += meshhdrlen;
1122 }
1123
1082 /* convert IEEE 802.11 header + possible LLC headers into Ethernet 1124 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1083 * header 1125 * header
1084 * IEEE 802.11 address fields: 1126 * IEEE 802.11 address fields:
@@ -1112,9 +1154,10 @@ ieee80211_data_to_8023(struct ieee80211_txrx_data *rx)
1112 memcpy(dst, hdr->addr3, ETH_ALEN); 1154 memcpy(dst, hdr->addr3, ETH_ALEN);
1113 memcpy(src, hdr->addr4, ETH_ALEN); 1155 memcpy(src, hdr->addr4, ETH_ALEN);
1114 1156
1115 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS)) { 1157 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1116 if (net_ratelimit()) 1158 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) {
1117 printk(KERN_DEBUG "%s: dropped FromDS&ToDS " 1159 if (net_ratelimit())
1160 printk(KERN_DEBUG "%s: dropped FromDS&ToDS "
1118 "frame (RA=%s TA=%s DA=%s SA=%s)\n", 1161 "frame (RA=%s TA=%s DA=%s SA=%s)\n",
1119 rx->dev->name, 1162 rx->dev->name,
1120 print_mac(mac, hdr->addr1), 1163 print_mac(mac, hdr->addr1),
@@ -1189,7 +1232,7 @@ ieee80211_data_to_8023(struct ieee80211_txrx_data *rx)
1189/* 1232/*
1190 * requires that rx->skb is a frame with ethernet header 1233 * requires that rx->skb is a frame with ethernet header
1191 */ 1234 */
1192static bool ieee80211_frame_allowed(struct ieee80211_txrx_data *rx) 1235static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx)
1193{ 1236{
1194 static const u8 pae_group_addr[ETH_ALEN] 1237 static const u8 pae_group_addr[ETH_ALEN]
1195 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1238 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
@@ -1215,7 +1258,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_txrx_data *rx)
1215 * requires that rx->skb is a frame with ethernet header 1258 * requires that rx->skb is a frame with ethernet header
1216 */ 1259 */
1217static void 1260static void
1218ieee80211_deliver_skb(struct ieee80211_txrx_data *rx) 1261ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1219{ 1262{
1220 struct net_device *dev = rx->dev; 1263 struct net_device *dev = rx->dev;
1221 struct ieee80211_local *local = rx->local; 1264 struct ieee80211_local *local = rx->local;
@@ -1229,7 +1272,7 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx)
1229 1272
1230 if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP || 1273 if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP ||
1231 sdata->vif.type == IEEE80211_IF_TYPE_VLAN) && 1274 sdata->vif.type == IEEE80211_IF_TYPE_VLAN) &&
1232 (rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { 1275 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1233 if (is_multicast_ether_addr(ehdr->h_dest)) { 1276 if (is_multicast_ether_addr(ehdr->h_dest)) {
1234 /* 1277 /*
1235 * send multicast frames both to higher layers in 1278 * send multicast frames both to higher layers in
@@ -1241,7 +1284,7 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx)
1241 "multicast frame\n", dev->name); 1284 "multicast frame\n", dev->name);
1242 } else { 1285 } else {
1243 dsta = sta_info_get(local, skb->data); 1286 dsta = sta_info_get(local, skb->data);
1244 if (dsta && dsta->dev == dev) { 1287 if (dsta && dsta->sdata->dev == dev) {
1245 /* 1288 /*
1246 * The destination station is associated to 1289 * The destination station is associated to
1247 * this AP (in this VLAN), so send the frame 1290 * this AP (in this VLAN), so send the frame
@@ -1251,8 +1294,38 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx)
1251 xmit_skb = skb; 1294 xmit_skb = skb;
1252 skb = NULL; 1295 skb = NULL;
1253 } 1296 }
1254 if (dsta) 1297 }
1255 sta_info_put(dsta); 1298 }
1299
1300 /* Mesh forwarding */
1301 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1302 u8 *mesh_ttl = &((struct ieee80211s_hdr *)skb->cb)->ttl;
1303 (*mesh_ttl)--;
1304
1305 if (is_multicast_ether_addr(skb->data)) {
1306 if (*mesh_ttl > 0) {
1307 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1308 if (!xmit_skb && net_ratelimit())
1309 printk(KERN_DEBUG "%s: failed to clone "
1310 "multicast frame\n", dev->name);
1311 else
1312 xmit_skb->pkt_type = PACKET_OTHERHOST;
1313 } else
1314 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta,
1315 dropped_frames_ttl);
1316 } else if (skb->pkt_type != PACKET_OTHERHOST &&
1317 compare_ether_addr(dev->dev_addr, skb->data) != 0) {
1318 if (*mesh_ttl == 0) {
1319 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta,
1320 dropped_frames_ttl);
1321 dev_kfree_skb(skb);
1322 skb = NULL;
1323 } else {
1324 xmit_skb = skb;
1325 xmit_skb->pkt_type = PACKET_OTHERHOST;
1326 if (!(dev->flags & IFF_PROMISC))
1327 skb = NULL;
1328 }
1256 } 1329 }
1257 } 1330 }
1258 1331
@@ -1272,8 +1345,8 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx)
1272 } 1345 }
1273} 1346}
1274 1347
1275static ieee80211_txrx_result 1348static ieee80211_rx_result
1276ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) 1349ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1277{ 1350{
1278 struct net_device *dev = rx->dev; 1351 struct net_device *dev = rx->dev;
1279 struct ieee80211_local *local = rx->local; 1352 struct ieee80211_local *local = rx->local;
@@ -1288,17 +1361,17 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1288 1361
1289 fc = rx->fc; 1362 fc = rx->fc;
1290 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) 1363 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1291 return TXRX_CONTINUE; 1364 return RX_CONTINUE;
1292 1365
1293 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1366 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1294 return TXRX_DROP; 1367 return RX_DROP_MONITOR;
1295 1368
1296 if (!(rx->flags & IEEE80211_TXRXD_RX_AMSDU)) 1369 if (!(rx->flags & IEEE80211_RX_AMSDU))
1297 return TXRX_CONTINUE; 1370 return RX_CONTINUE;
1298 1371
1299 err = ieee80211_data_to_8023(rx); 1372 err = ieee80211_data_to_8023(rx);
1300 if (unlikely(err)) 1373 if (unlikely(err))
1301 return TXRX_DROP; 1374 return RX_DROP_UNUSABLE;
1302 1375
1303 skb->dev = dev; 1376 skb->dev = dev;
1304 1377
@@ -1308,7 +1381,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1308 /* skip the wrapping header */ 1381 /* skip the wrapping header */
1309 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); 1382 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1310 if (!eth) 1383 if (!eth)
1311 return TXRX_DROP; 1384 return RX_DROP_UNUSABLE;
1312 1385
1313 while (skb != frame) { 1386 while (skb != frame) {
1314 u8 padding; 1387 u8 padding;
@@ -1323,7 +1396,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1323 /* the last MSDU has no padding */ 1396 /* the last MSDU has no padding */
1324 if (subframe_len > remaining) { 1397 if (subframe_len > remaining) {
1325 printk(KERN_DEBUG "%s: wrong buffer size", dev->name); 1398 printk(KERN_DEBUG "%s: wrong buffer size", dev->name);
1326 return TXRX_DROP; 1399 return RX_DROP_UNUSABLE;
1327 } 1400 }
1328 1401
1329 skb_pull(skb, sizeof(struct ethhdr)); 1402 skb_pull(skb, sizeof(struct ethhdr));
@@ -1335,7 +1408,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1335 subframe_len); 1408 subframe_len);
1336 1409
1337 if (frame == NULL) 1410 if (frame == NULL)
1338 return TXRX_DROP; 1411 return RX_DROP_UNUSABLE;
1339 1412
1340 skb_reserve(frame, local->hw.extra_tx_headroom + 1413 skb_reserve(frame, local->hw.extra_tx_headroom +
1341 sizeof(struct ethhdr)); 1414 sizeof(struct ethhdr));
@@ -1348,7 +1421,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1348 printk(KERN_DEBUG "%s: wrong buffer size ", 1421 printk(KERN_DEBUG "%s: wrong buffer size ",
1349 dev->name); 1422 dev->name);
1350 dev_kfree_skb(frame); 1423 dev_kfree_skb(frame);
1351 return TXRX_DROP; 1424 return RX_DROP_UNUSABLE;
1352 } 1425 }
1353 } 1426 }
1354 1427
@@ -1378,7 +1451,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1378 1451
1379 if (!ieee80211_frame_allowed(rx)) { 1452 if (!ieee80211_frame_allowed(rx)) {
1380 if (skb == frame) /* last frame */ 1453 if (skb == frame) /* last frame */
1381 return TXRX_DROP; 1454 return RX_DROP_UNUSABLE;
1382 dev_kfree_skb(frame); 1455 dev_kfree_skb(frame);
1383 continue; 1456 continue;
1384 } 1457 }
@@ -1386,11 +1459,11 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1386 ieee80211_deliver_skb(rx); 1459 ieee80211_deliver_skb(rx);
1387 } 1460 }
1388 1461
1389 return TXRX_QUEUED; 1462 return RX_QUEUED;
1390} 1463}
1391 1464
1392static ieee80211_txrx_result 1465static ieee80211_rx_result
1393ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) 1466ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1394{ 1467{
1395 struct net_device *dev = rx->dev; 1468 struct net_device *dev = rx->dev;
1396 u16 fc; 1469 u16 fc;
@@ -1398,17 +1471,17 @@ ieee80211_rx_h_data(struct ieee80211_txrx_data *rx)
1398 1471
1399 fc = rx->fc; 1472 fc = rx->fc;
1400 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) 1473 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1401 return TXRX_CONTINUE; 1474 return RX_CONTINUE;
1402 1475
1403 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1476 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1404 return TXRX_DROP; 1477 return RX_DROP_MONITOR;
1405 1478
1406 err = ieee80211_data_to_8023(rx); 1479 err = ieee80211_data_to_8023(rx);
1407 if (unlikely(err)) 1480 if (unlikely(err))
1408 return TXRX_DROP; 1481 return RX_DROP_UNUSABLE;
1409 1482
1410 if (!ieee80211_frame_allowed(rx)) 1483 if (!ieee80211_frame_allowed(rx))
1411 return TXRX_DROP; 1484 return RX_DROP_MONITOR;
1412 1485
1413 rx->skb->dev = dev; 1486 rx->skb->dev = dev;
1414 1487
@@ -1417,11 +1490,11 @@ ieee80211_rx_h_data(struct ieee80211_txrx_data *rx)
1417 1490
1418 ieee80211_deliver_skb(rx); 1491 ieee80211_deliver_skb(rx);
1419 1492
1420 return TXRX_QUEUED; 1493 return RX_QUEUED;
1421} 1494}
1422 1495
1423static ieee80211_txrx_result 1496static ieee80211_rx_result
1424ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx) 1497ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1425{ 1498{
1426 struct ieee80211_local *local = rx->local; 1499 struct ieee80211_local *local = rx->local;
1427 struct ieee80211_hw *hw = &local->hw; 1500 struct ieee80211_hw *hw = &local->hw;
@@ -1432,15 +1505,16 @@ ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx)
1432 u16 tid; 1505 u16 tid;
1433 1506
1434 if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL)) 1507 if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL))
1435 return TXRX_CONTINUE; 1508 return RX_CONTINUE;
1436 1509
1437 if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) { 1510 if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) {
1438 if (!rx->sta) 1511 if (!rx->sta)
1439 return TXRX_CONTINUE; 1512 return RX_CONTINUE;
1440 tid = le16_to_cpu(bar->control) >> 12; 1513 tid = le16_to_cpu(bar->control) >> 12;
1441 tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]); 1514 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1442 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) 1515 != HT_AGG_STATE_OPERATIONAL)
1443 return TXRX_CONTINUE; 1516 return RX_CONTINUE;
1517 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1444 1518
1445 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1519 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1446 1520
@@ -1457,77 +1531,35 @@ ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx)
1457 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, 1531 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1458 start_seq_num, 1); 1532 start_seq_num, 1);
1459 rcu_read_unlock(); 1533 rcu_read_unlock();
1460 return TXRX_DROP; 1534 return RX_DROP_UNUSABLE;
1461 } 1535 }
1462 1536
1463 return TXRX_CONTINUE; 1537 return RX_CONTINUE;
1464} 1538}
1465 1539
1466static ieee80211_txrx_result 1540static ieee80211_rx_result
1467ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx) 1541ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1468{ 1542{
1469 struct ieee80211_sub_if_data *sdata; 1543 struct ieee80211_sub_if_data *sdata;
1470 1544
1471 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 1545 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1472 return TXRX_DROP; 1546 return RX_DROP_MONITOR;
1473 1547
1474 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1548 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1475 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || 1549 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
1476 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) && 1550 sdata->vif.type == IEEE80211_IF_TYPE_IBSS ||
1551 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) &&
1477 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) 1552 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
1478 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status); 1553 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->status);
1479 else 1554 else
1480 return TXRX_DROP; 1555 return RX_DROP_MONITOR;
1481 1556
1482 return TXRX_QUEUED; 1557 return RX_QUEUED;
1483}
1484
1485static inline ieee80211_txrx_result __ieee80211_invoke_rx_handlers(
1486 struct ieee80211_local *local,
1487 ieee80211_rx_handler *handlers,
1488 struct ieee80211_txrx_data *rx,
1489 struct sta_info *sta)
1490{
1491 ieee80211_rx_handler *handler;
1492 ieee80211_txrx_result res = TXRX_DROP;
1493
1494 for (handler = handlers; *handler != NULL; handler++) {
1495 res = (*handler)(rx);
1496
1497 switch (res) {
1498 case TXRX_CONTINUE:
1499 continue;
1500 case TXRX_DROP:
1501 I802_DEBUG_INC(local->rx_handlers_drop);
1502 if (sta)
1503 sta->rx_dropped++;
1504 break;
1505 case TXRX_QUEUED:
1506 I802_DEBUG_INC(local->rx_handlers_queued);
1507 break;
1508 }
1509 break;
1510 }
1511
1512 if (res == TXRX_DROP)
1513 dev_kfree_skb(rx->skb);
1514 return res;
1515}
1516
1517static inline void ieee80211_invoke_rx_handlers(struct ieee80211_local *local,
1518 ieee80211_rx_handler *handlers,
1519 struct ieee80211_txrx_data *rx,
1520 struct sta_info *sta)
1521{
1522 if (__ieee80211_invoke_rx_handlers(local, handlers, rx, sta) ==
1523 TXRX_CONTINUE)
1524 dev_kfree_skb(rx->skb);
1525} 1558}
1526 1559
1527static void ieee80211_rx_michael_mic_report(struct net_device *dev, 1560static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1528 struct ieee80211_hdr *hdr, 1561 struct ieee80211_hdr *hdr,
1529 struct sta_info *sta, 1562 struct ieee80211_rx_data *rx)
1530 struct ieee80211_txrx_data *rx)
1531{ 1563{
1532 int keyidx, hdrlen; 1564 int keyidx, hdrlen;
1533 DECLARE_MAC_BUF(mac); 1565 DECLARE_MAC_BUF(mac);
@@ -1545,7 +1577,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1545 dev->name, print_mac(mac, hdr->addr2), 1577 dev->name, print_mac(mac, hdr->addr2),
1546 print_mac(mac2, hdr->addr1), keyidx); 1578 print_mac(mac2, hdr->addr1), keyidx);
1547 1579
1548 if (!sta) { 1580 if (!rx->sta) {
1549 /* 1581 /*
1550 * Some hardware seem to generate incorrect Michael MIC 1582 * Some hardware seem to generate incorrect Michael MIC
1551 * reports; ignore them to avoid triggering countermeasures. 1583 * reports; ignore them to avoid triggering countermeasures.
@@ -1597,7 +1629,89 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1597 rx->skb = NULL; 1629 rx->skb = NULL;
1598} 1630}
1599 1631
1600ieee80211_rx_handler ieee80211_rx_handlers[] = 1632/* TODO: use IEEE80211_RX_FRAGMENTED */
1633static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1634{
1635 struct ieee80211_sub_if_data *sdata;
1636 struct ieee80211_local *local = rx->local;
1637 struct ieee80211_rtap_hdr {
1638 struct ieee80211_radiotap_header hdr;
1639 u8 flags;
1640 u8 rate;
1641 __le16 chan_freq;
1642 __le16 chan_flags;
1643 } __attribute__ ((packed)) *rthdr;
1644 struct sk_buff *skb = rx->skb, *skb2;
1645 struct net_device *prev_dev = NULL;
1646 struct ieee80211_rx_status *status = rx->status;
1647
1648 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1649 goto out_free_skb;
1650
1651 if (skb_headroom(skb) < sizeof(*rthdr) &&
1652 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1653 goto out_free_skb;
1654
1655 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1656 memset(rthdr, 0, sizeof(*rthdr));
1657 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1658 rthdr->hdr.it_present =
1659 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1660 (1 << IEEE80211_RADIOTAP_RATE) |
1661 (1 << IEEE80211_RADIOTAP_CHANNEL));
1662
1663 rthdr->rate = rx->rate->bitrate / 5;
1664 rthdr->chan_freq = cpu_to_le16(status->freq);
1665
1666 if (status->band == IEEE80211_BAND_5GHZ)
1667 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1668 IEEE80211_CHAN_5GHZ);
1669 else
1670 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1671 IEEE80211_CHAN_2GHZ);
1672
1673 skb_set_mac_header(skb, 0);
1674 skb->ip_summed = CHECKSUM_UNNECESSARY;
1675 skb->pkt_type = PACKET_OTHERHOST;
1676 skb->protocol = htons(ETH_P_802_2);
1677
1678 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1679 if (!netif_running(sdata->dev))
1680 continue;
1681
1682 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR ||
1683 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1684 continue;
1685
1686 if (prev_dev) {
1687 skb2 = skb_clone(skb, GFP_ATOMIC);
1688 if (skb2) {
1689 skb2->dev = prev_dev;
1690 netif_rx(skb2);
1691 }
1692 }
1693
1694 prev_dev = sdata->dev;
1695 sdata->dev->stats.rx_packets++;
1696 sdata->dev->stats.rx_bytes += skb->len;
1697 }
1698
1699 if (prev_dev) {
1700 skb->dev = prev_dev;
1701 netif_rx(skb);
1702 skb = NULL;
1703 } else
1704 goto out_free_skb;
1705
1706 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
1707 return;
1708
1709 out_free_skb:
1710 dev_kfree_skb(skb);
1711}
1712
1713typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_rx_data *);
1714static ieee80211_rx_handler ieee80211_rx_handlers[] =
1601{ 1715{
1602 ieee80211_rx_h_if_stats, 1716 ieee80211_rx_h_if_stats,
1603 ieee80211_rx_h_passive_scan, 1717 ieee80211_rx_h_passive_scan,
@@ -1619,10 +1733,51 @@ ieee80211_rx_handler ieee80211_rx_handlers[] =
1619 NULL 1733 NULL
1620}; 1734};
1621 1735
1736static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1737 struct ieee80211_rx_data *rx,
1738 struct sk_buff *skb)
1739{
1740 ieee80211_rx_handler *handler;
1741 ieee80211_rx_result res = RX_DROP_MONITOR;
1742
1743 rx->skb = skb;
1744 rx->sdata = sdata;
1745 rx->dev = sdata->dev;
1746
1747 for (handler = ieee80211_rx_handlers; *handler != NULL; handler++) {
1748 res = (*handler)(rx);
1749
1750 switch (res) {
1751 case RX_CONTINUE:
1752 continue;
1753 case RX_DROP_UNUSABLE:
1754 case RX_DROP_MONITOR:
1755 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1756 if (rx->sta)
1757 rx->sta->rx_dropped++;
1758 break;
1759 case RX_QUEUED:
1760 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1761 break;
1762 }
1763 break;
1764 }
1765
1766 switch (res) {
1767 case RX_CONTINUE:
1768 case RX_DROP_MONITOR:
1769 ieee80211_rx_cooked_monitor(rx);
1770 break;
1771 case RX_DROP_UNUSABLE:
1772 dev_kfree_skb(rx->skb);
1773 break;
1774 }
1775}
1776
1622/* main receive path */ 1777/* main receive path */
1623 1778
1624static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, 1779static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1625 u8 *bssid, struct ieee80211_txrx_data *rx, 1780 u8 *bssid, struct ieee80211_rx_data *rx,
1626 struct ieee80211_hdr *hdr) 1781 struct ieee80211_hdr *hdr)
1627{ 1782{
1628 int multicast = is_multicast_ether_addr(hdr->addr1); 1783 int multicast = is_multicast_ether_addr(hdr->addr1);
@@ -1632,34 +1787,47 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1632 if (!bssid) 1787 if (!bssid)
1633 return 0; 1788 return 0;
1634 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1789 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1635 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1790 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1636 return 0; 1791 return 0;
1637 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1792 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1638 } else if (!multicast && 1793 } else if (!multicast &&
1639 compare_ether_addr(sdata->dev->dev_addr, 1794 compare_ether_addr(sdata->dev->dev_addr,
1640 hdr->addr1) != 0) { 1795 hdr->addr1) != 0) {
1641 if (!(sdata->dev->flags & IFF_PROMISC)) 1796 if (!(sdata->dev->flags & IFF_PROMISC))
1642 return 0; 1797 return 0;
1643 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1798 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1644 } 1799 }
1645 break; 1800 break;
1646 case IEEE80211_IF_TYPE_IBSS: 1801 case IEEE80211_IF_TYPE_IBSS:
1647 if (!bssid) 1802 if (!bssid)
1648 return 0; 1803 return 0;
1649 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1804 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
1650 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1805 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
1806 return 1;
1807 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1808 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1651 return 0; 1809 return 0;
1652 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1810 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1653 } else if (!multicast && 1811 } else if (!multicast &&
1654 compare_ether_addr(sdata->dev->dev_addr, 1812 compare_ether_addr(sdata->dev->dev_addr,
1655 hdr->addr1) != 0) { 1813 hdr->addr1) != 0) {
1656 if (!(sdata->dev->flags & IFF_PROMISC)) 1814 if (!(sdata->dev->flags & IFF_PROMISC))
1657 return 0; 1815 return 0;
1658 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1816 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1659 } else if (!rx->sta) 1817 } else if (!rx->sta)
1660 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, 1818 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb,
1661 bssid, hdr->addr2); 1819 bssid, hdr->addr2);
1662 break; 1820 break;
1821 case IEEE80211_IF_TYPE_MESH_POINT:
1822 if (!multicast &&
1823 compare_ether_addr(sdata->dev->dev_addr,
1824 hdr->addr1) != 0) {
1825 if (!(sdata->dev->flags & IFF_PROMISC))
1826 return 0;
1827
1828 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1829 }
1830 break;
1663 case IEEE80211_IF_TYPE_VLAN: 1831 case IEEE80211_IF_TYPE_VLAN:
1664 case IEEE80211_IF_TYPE_AP: 1832 case IEEE80211_IF_TYPE_AP:
1665 if (!bssid) { 1833 if (!bssid) {
@@ -1668,12 +1836,12 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1668 return 0; 1836 return 0;
1669 } else if (!ieee80211_bssid_match(bssid, 1837 } else if (!ieee80211_bssid_match(bssid,
1670 sdata->dev->dev_addr)) { 1838 sdata->dev->dev_addr)) {
1671 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1839 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1672 return 0; 1840 return 0;
1673 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1841 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1674 } 1842 }
1675 if (sdata->dev == sdata->local->mdev && 1843 if (sdata->dev == sdata->local->mdev &&
1676 !(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1844 !(rx->flags & IEEE80211_RX_IN_SCAN))
1677 /* do not receive anything via 1845 /* do not receive anything via
1678 * master device when not scanning */ 1846 * master device when not scanning */
1679 return 0; 1847 return 0;
@@ -1704,13 +1872,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1704static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 1872static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1705 struct sk_buff *skb, 1873 struct sk_buff *skb,
1706 struct ieee80211_rx_status *status, 1874 struct ieee80211_rx_status *status,
1707 u32 load) 1875 u32 load,
1876 struct ieee80211_rate *rate)
1708{ 1877{
1709 struct ieee80211_local *local = hw_to_local(hw); 1878 struct ieee80211_local *local = hw_to_local(hw);
1710 struct ieee80211_sub_if_data *sdata; 1879 struct ieee80211_sub_if_data *sdata;
1711 struct sta_info *sta;
1712 struct ieee80211_hdr *hdr; 1880 struct ieee80211_hdr *hdr;
1713 struct ieee80211_txrx_data rx; 1881 struct ieee80211_rx_data rx;
1714 u16 type; 1882 u16 type;
1715 int prepares; 1883 int prepares;
1716 struct ieee80211_sub_if_data *prev = NULL; 1884 struct ieee80211_sub_if_data *prev = NULL;
@@ -1722,42 +1890,33 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1722 rx.skb = skb; 1890 rx.skb = skb;
1723 rx.local = local; 1891 rx.local = local;
1724 1892
1725 rx.u.rx.status = status; 1893 rx.status = status;
1726 rx.u.rx.load = load; 1894 rx.load = load;
1895 rx.rate = rate;
1727 rx.fc = le16_to_cpu(hdr->frame_control); 1896 rx.fc = le16_to_cpu(hdr->frame_control);
1728 type = rx.fc & IEEE80211_FCTL_FTYPE; 1897 type = rx.fc & IEEE80211_FCTL_FTYPE;
1729 1898
1730 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) 1899 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT)
1731 local->dot11ReceivedFragmentCount++; 1900 local->dot11ReceivedFragmentCount++;
1732 1901
1733 sta = rx.sta = sta_info_get(local, hdr->addr2); 1902 rx.sta = sta_info_get(local, hdr->addr2);
1734 if (sta) { 1903 if (rx.sta) {
1735 rx.dev = rx.sta->dev; 1904 rx.sdata = rx.sta->sdata;
1736 rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev); 1905 rx.dev = rx.sta->sdata->dev;
1737 } 1906 }
1738 1907
1739 if ((status->flag & RX_FLAG_MMIC_ERROR)) { 1908 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
1740 ieee80211_rx_michael_mic_report(local->mdev, hdr, sta, &rx); 1909 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
1741 goto end; 1910 return;
1742 } 1911 }
1743 1912
1744 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning)) 1913 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning))
1745 rx.flags |= IEEE80211_TXRXD_RXIN_SCAN; 1914 rx.flags |= IEEE80211_RX_IN_SCAN;
1746 1915
1747 if (__ieee80211_invoke_rx_handlers(local, local->rx_pre_handlers, &rx, 1916 ieee80211_parse_qos(&rx);
1748 sta) != TXRX_CONTINUE) 1917 ieee80211_verify_ip_alignment(&rx);
1749 goto end;
1750 skb = rx.skb;
1751 1918
1752 if (sta && !(sta->flags & (WLAN_STA_WDS | WLAN_STA_ASSOC_AP)) && 1919 skb = rx.skb;
1753 !atomic_read(&local->iff_promiscs) &&
1754 !is_multicast_ether_addr(hdr->addr1)) {
1755 rx.flags |= IEEE80211_TXRXD_RXRA_MATCH;
1756 ieee80211_invoke_rx_handlers(local, local->rx_handlers, &rx,
1757 rx.sta);
1758 sta_info_put(sta);
1759 return;
1760 }
1761 1920
1762 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 1921 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1763 if (!netif_running(sdata->dev)) 1922 if (!netif_running(sdata->dev))
@@ -1767,10 +1926,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1767 continue; 1926 continue;
1768 1927
1769 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 1928 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
1770 rx.flags |= IEEE80211_TXRXD_RXRA_MATCH; 1929 rx.flags |= IEEE80211_RX_RA_MATCH;
1771 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr); 1930 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr);
1772 /* prepare_for_handlers can change sta */
1773 sta = rx.sta;
1774 1931
1775 if (!prepares) 1932 if (!prepares)
1776 continue; 1933 continue;
@@ -1801,26 +1958,14 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1801 continue; 1958 continue;
1802 } 1959 }
1803 rx.fc = le16_to_cpu(hdr->frame_control); 1960 rx.fc = le16_to_cpu(hdr->frame_control);
1804 rx.skb = skb_new; 1961 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
1805 rx.dev = prev->dev;
1806 rx.sdata = prev;
1807 ieee80211_invoke_rx_handlers(local, local->rx_handlers,
1808 &rx, sta);
1809 prev = sdata; 1962 prev = sdata;
1810 } 1963 }
1811 if (prev) { 1964 if (prev) {
1812 rx.fc = le16_to_cpu(hdr->frame_control); 1965 rx.fc = le16_to_cpu(hdr->frame_control);
1813 rx.skb = skb; 1966 ieee80211_invoke_rx_handlers(prev, &rx, skb);
1814 rx.dev = prev->dev;
1815 rx.sdata = prev;
1816 ieee80211_invoke_rx_handlers(local, local->rx_handlers,
1817 &rx, sta);
1818 } else 1967 } else
1819 dev_kfree_skb(skb); 1968 dev_kfree_skb(skb);
1820
1821 end:
1822 if (sta)
1823 sta_info_put(sta);
1824} 1969}
1825 1970
1826#define SEQ_MODULO 0x1000 1971#define SEQ_MODULO 0x1000
@@ -1856,6 +2001,8 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
1856 u16 head_seq_num, buf_size; 2001 u16 head_seq_num, buf_size;
1857 int index; 2002 int index;
1858 u32 pkt_load; 2003 u32 pkt_load;
2004 struct ieee80211_supported_band *sband;
2005 struct ieee80211_rate *rate;
1859 2006
1860 buf_size = tid_agg_rx->buf_size; 2007 buf_size = tid_agg_rx->buf_size;
1861 head_seq_num = tid_agg_rx->head_seq_num; 2008 head_seq_num = tid_agg_rx->head_seq_num;
@@ -1886,12 +2033,14 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
1886 memcpy(&status, 2033 memcpy(&status,
1887 tid_agg_rx->reorder_buf[index]->cb, 2034 tid_agg_rx->reorder_buf[index]->cb,
1888 sizeof(status)); 2035 sizeof(status));
2036 sband = local->hw.wiphy->bands[status.band];
2037 rate = &sband->bitrates[status.rate_idx];
1889 pkt_load = ieee80211_rx_load_stats(local, 2038 pkt_load = ieee80211_rx_load_stats(local,
1890 tid_agg_rx->reorder_buf[index], 2039 tid_agg_rx->reorder_buf[index],
1891 &status); 2040 &status, rate);
1892 __ieee80211_rx_handle_packet(hw, 2041 __ieee80211_rx_handle_packet(hw,
1893 tid_agg_rx->reorder_buf[index], 2042 tid_agg_rx->reorder_buf[index],
1894 &status, pkt_load); 2043 &status, pkt_load, rate);
1895 tid_agg_rx->stored_mpdu_num--; 2044 tid_agg_rx->stored_mpdu_num--;
1896 tid_agg_rx->reorder_buf[index] = NULL; 2045 tid_agg_rx->reorder_buf[index] = NULL;
1897 } 2046 }
@@ -1931,11 +2080,13 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
1931 /* release the reordered frame back to stack */ 2080 /* release the reordered frame back to stack */
1932 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, 2081 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
1933 sizeof(status)); 2082 sizeof(status));
2083 sband = local->hw.wiphy->bands[status.band];
2084 rate = &sband->bitrates[status.rate_idx];
1934 pkt_load = ieee80211_rx_load_stats(local, 2085 pkt_load = ieee80211_rx_load_stats(local,
1935 tid_agg_rx->reorder_buf[index], 2086 tid_agg_rx->reorder_buf[index],
1936 &status); 2087 &status, rate);
1937 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], 2088 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
1938 &status, pkt_load); 2089 &status, pkt_load, rate);
1939 tid_agg_rx->stored_mpdu_num--; 2090 tid_agg_rx->stored_mpdu_num--;
1940 tid_agg_rx->reorder_buf[index] = NULL; 2091 tid_agg_rx->reorder_buf[index] = NULL;
1941 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 2092 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -1970,11 +2121,12 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
1970 2121
1971 qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN; 2122 qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN;
1972 tid = qc[0] & QOS_CONTROL_TID_MASK; 2123 tid = qc[0] & QOS_CONTROL_TID_MASK;
1973 tid_agg_rx = &(sta->ampdu_mlme.tid_rx[tid]);
1974 2124
1975 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) 2125 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
1976 goto end_reorder; 2126 goto end_reorder;
1977 2127
2128 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2129
1978 /* null data frames are excluded */ 2130 /* null data frames are excluded */
1979 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC)) 2131 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC))
1980 goto end_reorder; 2132 goto end_reorder;
@@ -1991,7 +2143,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
1991 /* if this mpdu is fragmented - terminate rx aggregation session */ 2143 /* if this mpdu is fragmented - terminate rx aggregation session */
1992 sc = le16_to_cpu(hdr->seq_ctrl); 2144 sc = le16_to_cpu(hdr->seq_ctrl);
1993 if (sc & IEEE80211_SCTL_FRAG) { 2145 if (sc & IEEE80211_SCTL_FRAG) {
1994 ieee80211_sta_stop_rx_ba_session(sta->dev, sta->addr, 2146 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr,
1995 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); 2147 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
1996 ret = 1; 2148 ret = 1;
1997 goto end_reorder; 2149 goto end_reorder;
@@ -2001,9 +2153,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2001 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 2153 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2002 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, 2154 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2003 mpdu_seq_num, 0); 2155 mpdu_seq_num, 0);
2004end_reorder: 2156 end_reorder:
2005 if (sta)
2006 sta_info_put(sta);
2007 return ret; 2157 return ret;
2008} 2158}
2009 2159
@@ -2016,6 +2166,25 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2016{ 2166{
2017 struct ieee80211_local *local = hw_to_local(hw); 2167 struct ieee80211_local *local = hw_to_local(hw);
2018 u32 pkt_load; 2168 u32 pkt_load;
2169 struct ieee80211_rate *rate = NULL;
2170 struct ieee80211_supported_band *sband;
2171
2172 if (status->band < 0 ||
2173 status->band > IEEE80211_NUM_BANDS) {
2174 WARN_ON(1);
2175 return;
2176 }
2177
2178 sband = local->hw.wiphy->bands[status->band];
2179
2180 if (!sband ||
2181 status->rate_idx < 0 ||
2182 status->rate_idx >= sband->n_bitrates) {
2183 WARN_ON(1);
2184 return;
2185 }
2186
2187 rate = &sband->bitrates[status->rate_idx];
2019 2188
2020 /* 2189 /*
2021 * key references and virtual interfaces are protected using RCU 2190 * key references and virtual interfaces are protected using RCU
@@ -2030,17 +2199,17 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2030 * if it was previously present. 2199 * if it was previously present.
2031 * Also, frames with less than 16 bytes are dropped. 2200 * Also, frames with less than 16 bytes are dropped.
2032 */ 2201 */
2033 skb = ieee80211_rx_monitor(local, skb, status); 2202 skb = ieee80211_rx_monitor(local, skb, status, rate);
2034 if (!skb) { 2203 if (!skb) {
2035 rcu_read_unlock(); 2204 rcu_read_unlock();
2036 return; 2205 return;
2037 } 2206 }
2038 2207
2039 pkt_load = ieee80211_rx_load_stats(local, skb, status); 2208 pkt_load = ieee80211_rx_load_stats(local, skb, status, rate);
2040 local->channel_use_raw += pkt_load; 2209 local->channel_use_raw += pkt_load;
2041 2210
2042 if (!ieee80211_rx_reorder_ampdu(local, skb)) 2211 if (!ieee80211_rx_reorder_ampdu(local, skb))
2043 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load); 2212 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate);
2044 2213
2045 rcu_read_unlock(); 2214 rcu_read_unlock();
2046} 2215}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 1f74bd296357..7d4fe4a52929 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -15,21 +15,57 @@
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/if_arp.h> 16#include <linux/if_arp.h>
17#include <linux/timer.h> 17#include <linux/timer.h>
18#include <linux/rtnetlink.h>
18 19
19#include <net/mac80211.h> 20#include <net/mac80211.h>
20#include "ieee80211_i.h" 21#include "ieee80211_i.h"
21#include "ieee80211_rate.h" 22#include "rate.h"
22#include "sta_info.h" 23#include "sta_info.h"
23#include "debugfs_sta.h" 24#include "debugfs_sta.h"
25#include "mesh.h"
24 26
25/* Caller must hold local->sta_lock */ 27/**
26static void sta_info_hash_add(struct ieee80211_local *local, 28 * DOC: STA information lifetime rules
27 struct sta_info *sta) 29 *
28{ 30 * STA info structures (&struct sta_info) are managed in a hash table
29 sta->hnext = local->sta_hash[STA_HASH(sta->addr)]; 31 * for faster lookup and a list for iteration. They are managed using
30 local->sta_hash[STA_HASH(sta->addr)] = sta; 32 * RCU, i.e. access to the list and hash table is protected by RCU.
31} 33 *
32 34 * Upon allocating a STA info structure with sta_info_alloc(), the caller owns
35 * that structure. It must then either destroy it using sta_info_destroy()
36 * (which is pretty useless) or insert it into the hash table using
37 * sta_info_insert() which demotes the reference from ownership to a regular
38 * RCU-protected reference; if the function is called without protection by an
39 * RCU critical section the reference is instantly invalidated. Note that the
40 * caller may not do much with the STA info before inserting it, in particular,
41 * it may not start any mesh peer link management or add encryption keys.
42 *
43 * When the insertion fails (sta_info_insert()) returns non-zero), the
44 * structure will have been freed by sta_info_insert()!
45 *
46 * Because there are debugfs entries for each station, and adding those
47 * must be able to sleep, it is also possible to "pin" a station entry,
48 * that means it can be removed from the hash table but not be freed.
49 * See the comment in __sta_info_unlink() for more information, this is
50 * an internal capability only.
51 *
52 * In order to remove a STA info structure, the caller needs to first
53 * unlink it (sta_info_unlink()) from the list and hash tables and
54 * then destroy it; sta_info_destroy() will wait for an RCU grace period
55 * to elapse before actually freeing it. Due to the pinning and the
56 * possibility of multiple callers trying to remove the same STA info at
57 * the same time, sta_info_unlink() can clear the STA info pointer it is
58 * passed to indicate that the STA info is owned by somebody else now.
59 *
60 * If sta_info_unlink() did not clear the pointer then the caller owns
61 * the STA info structure now and is responsible of destroying it with
62 * a call to sta_info_destroy().
63 *
64 * In all other cases, there is no concept of ownership on a STA entry,
65 * each structure is owned by the global hash table/list until it is
66 * removed. All users of the structure need to be RCU protected so that
67 * the structure won't be freed before they are done using it.
68 */
33 69
34/* Caller must hold local->sta_lock */ 70/* Caller must hold local->sta_lock */
35static int sta_info_hash_del(struct ieee80211_local *local, 71static int sta_info_hash_del(struct ieee80211_local *local,
@@ -41,237 +77,439 @@ static int sta_info_hash_del(struct ieee80211_local *local,
41 if (!s) 77 if (!s)
42 return -ENOENT; 78 return -ENOENT;
43 if (s == sta) { 79 if (s == sta) {
44 local->sta_hash[STA_HASH(sta->addr)] = s->hnext; 80 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)],
81 s->hnext);
45 return 0; 82 return 0;
46 } 83 }
47 84
48 while (s->hnext && s->hnext != sta) 85 while (s->hnext && s->hnext != sta)
49 s = s->hnext; 86 s = s->hnext;
50 if (s->hnext) { 87 if (s->hnext) {
51 s->hnext = sta->hnext; 88 rcu_assign_pointer(s->hnext, sta->hnext);
52 return 0; 89 return 0;
53 } 90 }
54 91
55 return -ENOENT; 92 return -ENOENT;
56} 93}
57 94
58struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr) 95/* protected by RCU */
96static struct sta_info *__sta_info_find(struct ieee80211_local *local,
97 u8 *addr)
59{ 98{
60 struct sta_info *sta; 99 struct sta_info *sta;
61 100
62 read_lock_bh(&local->sta_lock); 101 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
63 sta = local->sta_hash[STA_HASH(addr)];
64 while (sta) { 102 while (sta) {
65 if (memcmp(sta->addr, addr, ETH_ALEN) == 0) { 103 if (compare_ether_addr(sta->addr, addr) == 0)
66 __sta_info_get(sta);
67 break; 104 break;
68 } 105 sta = rcu_dereference(sta->hnext);
69 sta = sta->hnext;
70 } 106 }
71 read_unlock_bh(&local->sta_lock);
72
73 return sta; 107 return sta;
74} 108}
109
110struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr)
111{
112 return __sta_info_find(local, addr);
113}
75EXPORT_SYMBOL(sta_info_get); 114EXPORT_SYMBOL(sta_info_get);
76 115
77int sta_info_min_txrate_get(struct ieee80211_local *local) 116struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
117 struct net_device *dev)
78{ 118{
79 struct sta_info *sta; 119 struct sta_info *sta;
80 struct ieee80211_hw_mode *mode; 120 int i = 0;
81 int min_txrate = 9999999; 121
82 int i; 122 list_for_each_entry_rcu(sta, &local->sta_list, list) {
83 123 if (dev && dev != sta->sdata->dev)
84 read_lock_bh(&local->sta_lock); 124 continue;
85 mode = local->oper_hw_mode; 125 if (i < idx) {
86 for (i = 0; i < STA_HASH_SIZE; i++) { 126 ++i;
87 sta = local->sta_hash[i]; 127 continue;
88 while (sta) {
89 if (sta->txrate < min_txrate)
90 min_txrate = sta->txrate;
91 sta = sta->hnext;
92 } 128 }
129 return sta;
93 } 130 }
94 read_unlock_bh(&local->sta_lock);
95 if (min_txrate == 9999999)
96 min_txrate = 0;
97 131
98 return mode->rates[min_txrate].rate; 132 return NULL;
99} 133}
100 134
135/**
136 * __sta_info_free - internal STA free helper
137 *
138 * @sta: STA info to free
139 *
140 * This function must undo everything done by sta_info_alloc()
141 * that may happen before sta_info_insert().
142 */
143static void __sta_info_free(struct ieee80211_local *local,
144 struct sta_info *sta)
145{
146 DECLARE_MAC_BUF(mbuf);
147
148 rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv);
149 rate_control_put(sta->rate_ctrl);
150
151#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
152 printk(KERN_DEBUG "%s: Destroyed STA %s\n",
153 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr));
154#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
101 155
102static void sta_info_release(struct kref *kref) 156 kfree(sta);
157}
158
159void sta_info_destroy(struct sta_info *sta)
103{ 160{
104 struct sta_info *sta = container_of(kref, struct sta_info, kref); 161 struct ieee80211_local *local;
105 struct ieee80211_local *local = sta->local;
106 struct sk_buff *skb; 162 struct sk_buff *skb;
107 int i; 163 int i;
108 164
109 /* free sta structure; it has already been removed from 165 might_sleep();
110 * hash table etc. external structures. Make sure that all 166
111 * buffered frames are release (one might have been added 167 if (!sta)
112 * after sta_info_free() was called). */ 168 return;
169
170 local = sta->local;
171
172 rate_control_remove_sta_debugfs(sta);
173 ieee80211_sta_debugfs_remove(sta);
174
175#ifdef CONFIG_MAC80211_MESH
176 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
177 mesh_plink_deactivate(sta);
178#endif
179
180 /*
181 * We have only unlinked the key, and actually destroying it
182 * may mean it is removed from hardware which requires that
183 * the key->sta pointer is still valid, so flush the key todo
184 * list here.
185 *
186 * ieee80211_key_todo() will synchronize_rcu() so after this
187 * nothing can reference this sta struct any more.
188 */
189 ieee80211_key_todo();
190
191#ifdef CONFIG_MAC80211_MESH
192 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
193 del_timer_sync(&sta->plink_timer);
194#endif
195
113 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { 196 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
114 local->total_ps_buffered--; 197 local->total_ps_buffered--;
115 dev_kfree_skb_any(skb); 198 dev_kfree_skb_any(skb);
116 } 199 }
117 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 200
201 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
118 dev_kfree_skb_any(skb); 202 dev_kfree_skb_any(skb);
203
204 for (i = 0; i < STA_TID_NUM; i++) {
205 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
206 if (sta->ampdu_mlme.tid_rx[i])
207 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer);
208 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
209 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
210 if (sta->ampdu_mlme.tid_tx[i])
211 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer);
212 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
119 } 213 }
120 for (i = 0; i < STA_TID_NUM; i++) 214
121 del_timer_sync(&sta->ampdu_mlme.tid_rx[i].session_timer); 215 __sta_info_free(local, sta);
122 rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv);
123 rate_control_put(sta->rate_ctrl);
124 kfree(sta);
125} 216}
126 217
127 218
128void sta_info_put(struct sta_info *sta) 219/* Caller must hold local->sta_lock */
220static void sta_info_hash_add(struct ieee80211_local *local,
221 struct sta_info *sta)
129{ 222{
130 kref_put(&sta->kref, sta_info_release); 223 sta->hnext = local->sta_hash[STA_HASH(sta->addr)];
224 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], sta);
131} 225}
132EXPORT_SYMBOL(sta_info_put);
133 226
134 227struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
135struct sta_info * sta_info_add(struct ieee80211_local *local, 228 u8 *addr, gfp_t gfp)
136 struct net_device *dev, u8 *addr, gfp_t gfp)
137{ 229{
230 struct ieee80211_local *local = sdata->local;
138 struct sta_info *sta; 231 struct sta_info *sta;
139 int i; 232 int i;
140 DECLARE_MAC_BUF(mac); 233 DECLARE_MAC_BUF(mbuf);
141 234
142 sta = kzalloc(sizeof(*sta), gfp); 235 sta = kzalloc(sizeof(*sta), gfp);
143 if (!sta) 236 if (!sta)
144 return NULL; 237 return NULL;
145 238
146 kref_init(&sta->kref); 239 memcpy(sta->addr, addr, ETH_ALEN);
240 sta->local = local;
241 sta->sdata = sdata;
147 242
148 sta->rate_ctrl = rate_control_get(local->rate_ctrl); 243 sta->rate_ctrl = rate_control_get(local->rate_ctrl);
149 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, gfp); 244 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
245 gfp);
150 if (!sta->rate_ctrl_priv) { 246 if (!sta->rate_ctrl_priv) {
151 rate_control_put(sta->rate_ctrl); 247 rate_control_put(sta->rate_ctrl);
152 kfree(sta); 248 kfree(sta);
153 return NULL; 249 return NULL;
154 } 250 }
155 251
156 memcpy(sta->addr, addr, ETH_ALEN);
157 sta->local = local;
158 sta->dev = dev;
159 spin_lock_init(&sta->ampdu_mlme.ampdu_rx); 252 spin_lock_init(&sta->ampdu_mlme.ampdu_rx);
253 spin_lock_init(&sta->ampdu_mlme.ampdu_tx);
160 for (i = 0; i < STA_TID_NUM; i++) { 254 for (i = 0; i < STA_TID_NUM; i++) {
161 /* timer_to_tid must be initialized with identity mapping to 255 /* timer_to_tid must be initialized with identity mapping to
162 * enable session_timer's data differentiation. refer to 256 * enable session_timer's data differentiation. refer to
163 * sta_rx_agg_session_timer_expired for useage */ 257 * sta_rx_agg_session_timer_expired for useage */
164 sta->timer_to_tid[i] = i; 258 sta->timer_to_tid[i] = i;
165 /* rx timers */ 259 /* tid to tx queue: initialize according to HW (0 is valid) */
166 sta->ampdu_mlme.tid_rx[i].session_timer.function = 260 sta->tid_to_tx_q[i] = local->hw.queues;
167 sta_rx_agg_session_timer_expired; 261 /* rx */
168 sta->ampdu_mlme.tid_rx[i].session_timer.data = 262 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
169 (unsigned long)&sta->timer_to_tid[i]; 263 sta->ampdu_mlme.tid_rx[i] = NULL;
170 init_timer(&sta->ampdu_mlme.tid_rx[i].session_timer); 264 /* tx */
265 sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
266 sta->ampdu_mlme.tid_tx[i] = NULL;
267 sta->ampdu_mlme.addba_req_num[i] = 0;
171 } 268 }
172 skb_queue_head_init(&sta->ps_tx_buf); 269 skb_queue_head_init(&sta->ps_tx_buf);
173 skb_queue_head_init(&sta->tx_filtered); 270 skb_queue_head_init(&sta->tx_filtered);
174 __sta_info_get(sta); /* sta used by caller, decremented by 271
175 * sta_info_put() */ 272#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
176 write_lock_bh(&local->sta_lock); 273 printk(KERN_DEBUG "%s: Allocated STA %s\n",
274 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr));
275#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
276
277#ifdef CONFIG_MAC80211_MESH
278 sta->plink_state = PLINK_LISTEN;
279 spin_lock_init(&sta->plink_lock);
280 init_timer(&sta->plink_timer);
281#endif
282
283 return sta;
284}
285
286int sta_info_insert(struct sta_info *sta)
287{
288 struct ieee80211_local *local = sta->local;
289 struct ieee80211_sub_if_data *sdata = sta->sdata;
290 unsigned long flags;
291 int err = 0;
292 DECLARE_MAC_BUF(mac);
293
294 /*
295 * Can't be a WARN_ON because it can be triggered through a race:
296 * something inserts a STA (on one CPU) without holding the RTNL
297 * and another CPU turns off the net device.
298 */
299 if (unlikely(!netif_running(sdata->dev))) {
300 err = -ENETDOWN;
301 goto out_free;
302 }
303
304 if (WARN_ON(compare_ether_addr(sta->addr, sdata->dev->dev_addr) == 0 ||
305 is_multicast_ether_addr(sta->addr))) {
306 err = -EINVAL;
307 goto out_free;
308 }
309
310 spin_lock_irqsave(&local->sta_lock, flags);
311 /* check if STA exists already */
312 if (__sta_info_find(local, sta->addr)) {
313 spin_unlock_irqrestore(&local->sta_lock, flags);
314 err = -EEXIST;
315 goto out_free;
316 }
177 list_add(&sta->list, &local->sta_list); 317 list_add(&sta->list, &local->sta_list);
178 local->num_sta++; 318 local->num_sta++;
179 sta_info_hash_add(local, sta); 319 sta_info_hash_add(local, sta);
180 if (local->ops->sta_notify) {
181 struct ieee80211_sub_if_data *sdata;
182 320
183 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 321 /* notify driver */
322 if (local->ops->sta_notify) {
184 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 323 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
185 sdata = sdata->u.vlan.ap; 324 sdata = sdata->u.vlan.ap;
186 325
187 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 326 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
188 STA_NOTIFY_ADD, addr); 327 STA_NOTIFY_ADD, sta->addr);
189 } 328 }
190 write_unlock_bh(&local->sta_lock);
191 329
192#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 330#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
193 printk(KERN_DEBUG "%s: Added STA %s\n", 331 printk(KERN_DEBUG "%s: Inserted STA %s\n",
194 wiphy_name(local->hw.wiphy), print_mac(mac, addr)); 332 wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr));
195#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 333#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
196 334
335 spin_unlock_irqrestore(&local->sta_lock, flags);
336
197#ifdef CONFIG_MAC80211_DEBUGFS 337#ifdef CONFIG_MAC80211_DEBUGFS
198 /* debugfs entry adding might sleep, so schedule process 338 /*
339 * Debugfs entry adding might sleep, so schedule process
199 * context task for adding entry for STAs that do not yet 340 * context task for adding entry for STAs that do not yet
200 * have one. */ 341 * have one.
201 queue_work(local->hw.workqueue, &local->sta_debugfs_add); 342 * NOTE: due to auto-freeing semantics this may only be done
343 * if the insertion is successful!
344 */
345 schedule_work(&local->sta_debugfs_add);
202#endif 346#endif
203 347
204 return sta; 348 if (ieee80211_vif_is_mesh(&sdata->vif))
349 mesh_accept_plinks_update(sdata);
350
351 return 0;
352 out_free:
353 BUG_ON(!err);
354 __sta_info_free(local, sta);
355 return err;
205} 356}
206 357
207/* Caller must hold local->sta_lock */ 358static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
208void sta_info_remove(struct sta_info *sta)
209{ 359{
210 struct ieee80211_local *local = sta->local; 360 /*
211 struct ieee80211_sub_if_data *sdata; 361 * This format has been mandated by the IEEE specifications,
362 * so this line may not be changed to use the __set_bit() format.
363 */
364 bss->tim[aid / 8] |= (1 << (aid % 8));
365}
212 366
213 /* don't do anything if we've been removed already */ 367static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
214 if (sta_info_hash_del(local, sta)) 368{
215 return; 369 /*
370 * This format has been mandated by the IEEE specifications,
371 * so this line may not be changed to use the __clear_bit() format.
372 */
373 bss->tim[aid / 8] &= ~(1 << (aid % 8));
374}
216 375
217 list_del(&sta->list); 376static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
218 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 377 struct sta_info *sta)
219 if (sta->flags & WLAN_STA_PS) { 378{
220 sta->flags &= ~WLAN_STA_PS; 379 if (bss)
221 if (sdata->bss) 380 __bss_tim_set(bss, sta->aid);
222 atomic_dec(&sdata->bss->num_sta_ps); 381 if (sta->local->ops->set_tim) {
382 sta->local->tim_in_locked_section = true;
383 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1);
384 sta->local->tim_in_locked_section = false;
223 } 385 }
224 local->num_sta--; 386}
225 sta_info_remove_aid_ptr(sta); 387
388void sta_info_set_tim_bit(struct sta_info *sta)
389{
390 unsigned long flags;
226 391
392 spin_lock_irqsave(&sta->local->sta_lock, flags);
393 __sta_info_set_tim_bit(sta->sdata->bss, sta);
394 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
227} 395}
228 396
229void sta_info_free(struct sta_info *sta) 397static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
398 struct sta_info *sta)
230{ 399{
231 struct sk_buff *skb; 400 if (bss)
232 struct ieee80211_local *local = sta->local; 401 __bss_tim_clear(bss, sta->aid);
233 DECLARE_MAC_BUF(mac); 402 if (sta->local->ops->set_tim) {
403 sta->local->tim_in_locked_section = true;
404 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0);
405 sta->local->tim_in_locked_section = false;
406 }
407}
234 408
235 might_sleep(); 409void sta_info_clear_tim_bit(struct sta_info *sta)
410{
411 unsigned long flags;
236 412
237 write_lock_bh(&local->sta_lock); 413 spin_lock_irqsave(&sta->local->sta_lock, flags);
238 sta_info_remove(sta); 414 __sta_info_clear_tim_bit(sta->sdata->bss, sta);
239 write_unlock_bh(&local->sta_lock); 415 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
416}
240 417
241 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { 418void __sta_info_unlink(struct sta_info **sta)
242 local->total_ps_buffered--; 419{
243 dev_kfree_skb(skb); 420 struct ieee80211_local *local = (*sta)->local;
244 } 421 struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
245 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 422#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
246 dev_kfree_skb(skb); 423 DECLARE_MAC_BUF(mbuf);
424#endif
425 /*
426 * pull caller's reference if we're already gone.
427 */
428 if (sta_info_hash_del(local, *sta)) {
429 *sta = NULL;
430 return;
247 } 431 }
248 432
249#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 433 if ((*sta)->key) {
250 printk(KERN_DEBUG "%s: Removed STA %s\n", 434 ieee80211_key_free((*sta)->key);
251 wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr)); 435 WARN_ON((*sta)->key);
252#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 436 }
253 437
254 ieee80211_key_free(sta->key); 438 list_del(&(*sta)->list);
255 sta->key = NULL;
256 439
257 if (local->ops->sta_notify) { 440 if ((*sta)->flags & WLAN_STA_PS) {
258 struct ieee80211_sub_if_data *sdata; 441 (*sta)->flags &= ~WLAN_STA_PS;
442 if (sdata->bss)
443 atomic_dec(&sdata->bss->num_sta_ps);
444 __sta_info_clear_tim_bit(sdata->bss, *sta);
445 }
259 446
260 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 447 local->num_sta--;
261 448
449 if (local->ops->sta_notify) {
262 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 450 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
263 sdata = sdata->u.vlan.ap; 451 sdata = sdata->u.vlan.ap;
264 452
265 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 453 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
266 STA_NOTIFY_REMOVE, sta->addr); 454 STA_NOTIFY_REMOVE, (*sta)->addr);
267 } 455 }
268 456
269 rate_control_remove_sta_debugfs(sta); 457 if (ieee80211_vif_is_mesh(&sdata->vif)) {
270 ieee80211_sta_debugfs_remove(sta); 458 mesh_accept_plinks_update(sdata);
459#ifdef CONFIG_MAC80211_MESH
460 del_timer(&(*sta)->plink_timer);
461#endif
462 }
271 463
272 sta_info_put(sta); 464#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
465 printk(KERN_DEBUG "%s: Removed STA %s\n",
466 wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->addr));
467#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
468
469 /*
470 * Finally, pull caller's reference if the STA is pinned by the
471 * task that is adding the debugfs entries. In that case, we
472 * leave the STA "to be freed".
473 *
474 * The rules are not trivial, but not too complex either:
475 * (1) pin_status is only modified under the sta_lock
476 * (2) STAs may only be pinned under the RTNL so that
477 * sta_info_flush() is guaranteed to actually destroy
478 * all STAs that are active for a given interface, this
479 * is required for correctness because otherwise we
480 * could notify a driver that an interface is going
481 * away and only after that (!) notify it about a STA
482 * on that interface going away.
483 * (3) sta_info_debugfs_add_work() will set the status
484 * to PINNED when it found an item that needs a new
485 * debugfs directory created. In that case, that item
486 * must not be freed although all *RCU* users are done
487 * with it. Hence, we tell the caller of _unlink()
488 * that the item is already gone (as can happen when
489 * two tasks try to unlink/destroy at the same time)
490 * (4) We set the pin_status to DESTROY here when we
491 * find such an item.
492 * (5) sta_info_debugfs_add_work() will reset the pin_status
493 * from PINNED to NORMAL when it is done with the item,
494 * but will check for DESTROY before resetting it in
495 * which case it will free the item.
496 */
497 if ((*sta)->pin_status == STA_INFO_PIN_STAT_PINNED) {
498 (*sta)->pin_status = STA_INFO_PIN_STAT_DESTROY;
499 *sta = NULL;
500 return;
501 }
273} 502}
274 503
504void sta_info_unlink(struct sta_info **sta)
505{
506 struct ieee80211_local *local = (*sta)->local;
507 unsigned long flags;
508
509 spin_lock_irqsave(&local->sta_lock, flags);
510 __sta_info_unlink(sta);
511 spin_unlock_irqrestore(&local->sta_lock, flags);
512}
275 513
276static inline int sta_info_buffer_expired(struct ieee80211_local *local, 514static inline int sta_info_buffer_expired(struct ieee80211_local *local,
277 struct sta_info *sta, 515 struct sta_info *sta,
@@ -299,6 +537,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
299{ 537{
300 unsigned long flags; 538 unsigned long flags;
301 struct sk_buff *skb; 539 struct sk_buff *skb;
540 struct ieee80211_sub_if_data *sdata;
302 DECLARE_MAC_BUF(mac); 541 DECLARE_MAC_BUF(mac);
303 542
304 if (skb_queue_empty(&sta->ps_tx_buf)) 543 if (skb_queue_empty(&sta->ps_tx_buf))
@@ -307,21 +546,23 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
307 for (;;) { 546 for (;;) {
308 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); 547 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
309 skb = skb_peek(&sta->ps_tx_buf); 548 skb = skb_peek(&sta->ps_tx_buf);
310 if (sta_info_buffer_expired(local, sta, skb)) { 549 if (sta_info_buffer_expired(local, sta, skb))
311 skb = __skb_dequeue(&sta->ps_tx_buf); 550 skb = __skb_dequeue(&sta->ps_tx_buf);
312 if (skb_queue_empty(&sta->ps_tx_buf)) 551 else
313 sta->flags &= ~WLAN_STA_TIM;
314 } else
315 skb = NULL; 552 skb = NULL;
316 spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags); 553 spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags);
317 554
318 if (skb) { 555 if (!skb)
319 local->total_ps_buffered--;
320 printk(KERN_DEBUG "Buffered frame expired (STA "
321 "%s)\n", print_mac(mac, sta->addr));
322 dev_kfree_skb(skb);
323 } else
324 break; 556 break;
557
558 sdata = sta->sdata;
559 local->total_ps_buffered--;
560 printk(KERN_DEBUG "Buffered frame expired (STA "
561 "%s)\n", print_mac(mac, sta->addr));
562 dev_kfree_skb(skb);
563
564 if (skb_queue_empty(&sta->ps_tx_buf))
565 sta_info_clear_tim_bit(sta);
325 } 566 }
326} 567}
327 568
@@ -331,13 +572,10 @@ static void sta_info_cleanup(unsigned long data)
331 struct ieee80211_local *local = (struct ieee80211_local *) data; 572 struct ieee80211_local *local = (struct ieee80211_local *) data;
332 struct sta_info *sta; 573 struct sta_info *sta;
333 574
334 read_lock_bh(&local->sta_lock); 575 rcu_read_lock();
335 list_for_each_entry(sta, &local->sta_list, list) { 576 list_for_each_entry_rcu(sta, &local->sta_list, list)
336 __sta_info_get(sta);
337 sta_info_cleanup_expire_buffered(local, sta); 577 sta_info_cleanup_expire_buffered(local, sta);
338 sta_info_put(sta); 578 rcu_read_unlock();
339 }
340 read_unlock_bh(&local->sta_lock);
341 579
342 local->sta_cleanup.expires = 580 local->sta_cleanup.expires =
343 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 581 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
@@ -345,38 +583,106 @@ static void sta_info_cleanup(unsigned long data)
345} 583}
346 584
347#ifdef CONFIG_MAC80211_DEBUGFS 585#ifdef CONFIG_MAC80211_DEBUGFS
348static void sta_info_debugfs_add_task(struct work_struct *work) 586/*
587 * See comment in __sta_info_unlink,
588 * caller must hold local->sta_lock.
589 */
590static void __sta_info_pin(struct sta_info *sta)
591{
592 WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_NORMAL);
593 sta->pin_status = STA_INFO_PIN_STAT_PINNED;
594}
595
596/*
597 * See comment in __sta_info_unlink, returns sta if it
598 * needs to be destroyed.
599 */
600static struct sta_info *__sta_info_unpin(struct sta_info *sta)
601{
602 struct sta_info *ret = NULL;
603 unsigned long flags;
604
605 spin_lock_irqsave(&sta->local->sta_lock, flags);
606 WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_DESTROY &&
607 sta->pin_status != STA_INFO_PIN_STAT_PINNED);
608 if (sta->pin_status == STA_INFO_PIN_STAT_DESTROY)
609 ret = sta;
610 sta->pin_status = STA_INFO_PIN_STAT_NORMAL;
611 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
612
613 return ret;
614}
615
616static void sta_info_debugfs_add_work(struct work_struct *work)
349{ 617{
350 struct ieee80211_local *local = 618 struct ieee80211_local *local =
351 container_of(work, struct ieee80211_local, sta_debugfs_add); 619 container_of(work, struct ieee80211_local, sta_debugfs_add);
352 struct sta_info *sta, *tmp; 620 struct sta_info *sta, *tmp;
621 unsigned long flags;
353 622
623 /* We need to keep the RTNL across the whole pinned status. */
624 rtnl_lock();
354 while (1) { 625 while (1) {
355 sta = NULL; 626 sta = NULL;
356 read_lock_bh(&local->sta_lock); 627
628 spin_lock_irqsave(&local->sta_lock, flags);
357 list_for_each_entry(tmp, &local->sta_list, list) { 629 list_for_each_entry(tmp, &local->sta_list, list) {
358 if (!tmp->debugfs.dir) { 630 if (!tmp->debugfs.dir) {
359 sta = tmp; 631 sta = tmp;
360 __sta_info_get(sta); 632 __sta_info_pin(sta);
361 break; 633 break;
362 } 634 }
363 } 635 }
364 read_unlock_bh(&local->sta_lock); 636 spin_unlock_irqrestore(&local->sta_lock, flags);
365 637
366 if (!sta) 638 if (!sta)
367 break; 639 break;
368 640
369 ieee80211_sta_debugfs_add(sta); 641 ieee80211_sta_debugfs_add(sta);
370 rate_control_add_sta_debugfs(sta); 642 rate_control_add_sta_debugfs(sta);
371 sta_info_put(sta); 643
644 sta = __sta_info_unpin(sta);
645 sta_info_destroy(sta);
372 } 646 }
647 rtnl_unlock();
373} 648}
374#endif 649#endif
375 650
651static void __ieee80211_run_pending_flush(struct ieee80211_local *local)
652{
653 struct sta_info *sta;
654 unsigned long flags;
655
656 ASSERT_RTNL();
657
658 spin_lock_irqsave(&local->sta_lock, flags);
659 while (!list_empty(&local->sta_flush_list)) {
660 sta = list_first_entry(&local->sta_flush_list,
661 struct sta_info, list);
662 list_del(&sta->list);
663 spin_unlock_irqrestore(&local->sta_lock, flags);
664 sta_info_destroy(sta);
665 spin_lock_irqsave(&local->sta_lock, flags);
666 }
667 spin_unlock_irqrestore(&local->sta_lock, flags);
668}
669
670static void ieee80211_sta_flush_work(struct work_struct *work)
671{
672 struct ieee80211_local *local =
673 container_of(work, struct ieee80211_local, sta_flush_work);
674
675 rtnl_lock();
676 __ieee80211_run_pending_flush(local);
677 rtnl_unlock();
678}
679
376void sta_info_init(struct ieee80211_local *local) 680void sta_info_init(struct ieee80211_local *local)
377{ 681{
378 rwlock_init(&local->sta_lock); 682 spin_lock_init(&local->sta_lock);
379 INIT_LIST_HEAD(&local->sta_list); 683 INIT_LIST_HEAD(&local->sta_list);
684 INIT_LIST_HEAD(&local->sta_flush_list);
685 INIT_WORK(&local->sta_flush_work, ieee80211_sta_flush_work);
380 686
381 setup_timer(&local->sta_cleanup, sta_info_cleanup, 687 setup_timer(&local->sta_cleanup, sta_info_cleanup,
382 (unsigned long)local); 688 (unsigned long)local);
@@ -384,7 +690,7 @@ void sta_info_init(struct ieee80211_local *local)
384 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 690 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
385 691
386#ifdef CONFIG_MAC80211_DEBUGFS 692#ifdef CONFIG_MAC80211_DEBUGFS
387 INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_task); 693 INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_work);
388#endif 694#endif
389} 695}
390 696
@@ -397,47 +703,89 @@ int sta_info_start(struct ieee80211_local *local)
397void sta_info_stop(struct ieee80211_local *local) 703void sta_info_stop(struct ieee80211_local *local)
398{ 704{
399 del_timer(&local->sta_cleanup); 705 del_timer(&local->sta_cleanup);
706 cancel_work_sync(&local->sta_flush_work);
707#ifdef CONFIG_MAC80211_DEBUGFS
708 /*
709 * Make sure the debugfs adding work isn't pending after this
710 * because we're about to be destroyed. It doesn't matter
711 * whether it ran or not since we're going to flush all STAs
712 * anyway.
713 */
714 cancel_work_sync(&local->sta_debugfs_add);
715#endif
716
717 rtnl_lock();
400 sta_info_flush(local, NULL); 718 sta_info_flush(local, NULL);
719 __ieee80211_run_pending_flush(local);
720 rtnl_unlock();
401} 721}
402 722
403void sta_info_remove_aid_ptr(struct sta_info *sta) 723/**
724 * sta_info_flush - flush matching STA entries from the STA table
725 *
726 * Returns the number of removed STA entries.
727 *
728 * @local: local interface data
729 * @sdata: matching rule for the net device (sta->dev) or %NULL to match all STAs
730 */
731int sta_info_flush(struct ieee80211_local *local,
732 struct ieee80211_sub_if_data *sdata)
404{ 733{
405 struct ieee80211_sub_if_data *sdata; 734 struct sta_info *sta, *tmp;
735 LIST_HEAD(tmp_list);
736 int ret = 0;
737 unsigned long flags;
406 738
407 if (sta->aid <= 0) 739 might_sleep();
408 return; 740 ASSERT_RTNL();
741
742 spin_lock_irqsave(&local->sta_lock, flags);
743 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
744 if (!sdata || sdata == sta->sdata) {
745 __sta_info_unlink(&sta);
746 if (sta) {
747 list_add_tail(&sta->list, &tmp_list);
748 ret++;
749 }
750 }
751 }
752 spin_unlock_irqrestore(&local->sta_lock, flags);
409 753
410 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 754 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
755 sta_info_destroy(sta);
411 756
412 if (sdata->local->ops->set_tim) 757 return ret;
413 sdata->local->ops->set_tim(local_to_hw(sdata->local),
414 sta->aid, 0);
415 if (sdata->bss)
416 __bss_tim_clear(sdata->bss, sta->aid);
417} 758}
418 759
419
420/** 760/**
421 * sta_info_flush - flush matching STA entries from the STA table 761 * sta_info_flush_delayed - flush matching STA entries from the STA table
422 * @local: local interface data 762 *
423 * @dev: matching rule for the net device (sta->dev) or %NULL to match all STAs 763 * This function unlinks all stations for a given interface and queues
764 * them for freeing. Note that the workqueue function scheduled here has
765 * to run before any new keys can be added to the system to avoid set_key()
766 * callback ordering issues.
767 *
768 * @sdata: the interface
424 */ 769 */
425void sta_info_flush(struct ieee80211_local *local, struct net_device *dev) 770void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata)
426{ 771{
772 struct ieee80211_local *local = sdata->local;
427 struct sta_info *sta, *tmp; 773 struct sta_info *sta, *tmp;
428 LIST_HEAD(tmp_list); 774 unsigned long flags;
429 775 bool work = false;
430 write_lock_bh(&local->sta_lock); 776
431 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) 777 spin_lock_irqsave(&local->sta_lock, flags);
432 if (!dev || dev == sta->dev) { 778 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
433 __sta_info_get(sta); 779 if (sdata == sta->sdata) {
434 sta_info_remove(sta); 780 __sta_info_unlink(&sta);
435 list_add_tail(&sta->list, &tmp_list); 781 if (sta) {
782 list_add_tail(&sta->list,
783 &local->sta_flush_list);
784 work = true;
785 }
436 } 786 }
437 write_unlock_bh(&local->sta_lock);
438
439 list_for_each_entry_safe(sta, tmp, &tmp_list, list) {
440 sta_info_free(sta);
441 sta_info_put(sta);
442 } 787 }
788 if (work)
789 schedule_work(&local->sta_flush_work);
790 spin_unlock_irqrestore(&local->sta_lock, flags);
443} 791}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 96fe3ed95038..f8c95bc9659c 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -12,160 +12,293 @@
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/if_ether.h> 14#include <linux/if_ether.h>
15#include <linux/kref.h> 15#include "key.h"
16#include "ieee80211_key.h" 16
17 17/**
18/* Stations flags (struct sta_info::flags) */ 18 * enum ieee80211_sta_info_flags - Stations flags
19#define WLAN_STA_AUTH BIT(0) 19 *
20#define WLAN_STA_ASSOC BIT(1) 20 * These flags are used with &struct sta_info's @flags member.
21#define WLAN_STA_PS BIT(2) 21 *
22#define WLAN_STA_TIM BIT(3) /* TIM bit is on for PS stations */ 22 * @WLAN_STA_AUTH: Station is authenticated.
23#define WLAN_STA_PERM BIT(4) /* permanent; do not remove entry on expiration */ 23 * @WLAN_STA_ASSOC: Station is associated.
24#define WLAN_STA_AUTHORIZED BIT(5) /* If 802.1X is used, this flag is 24 * @WLAN_STA_PS: Station is in power-save mode
25 * controlling whether STA is authorized to 25 * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic.
26 * send and receive non-IEEE 802.1X frames 26 * This bit is always checked so needs to be enabled for all stations
27 */ 27 * when virtual port control is not in use.
28#define WLAN_STA_SHORT_PREAMBLE BIT(7) 28 * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble
29/* whether this is an AP that we are associated with as a client */ 29 * frames.
30#define WLAN_STA_ASSOC_AP BIT(8) 30 * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP.
31#define WLAN_STA_WME BIT(9) 31 * @WLAN_STA_WME: Station is a QoS-STA.
32#define WLAN_STA_WDS BIT(27) 32 * @WLAN_STA_WDS: Station is one of our WDS peers.
33 * @WLAN_STA_PSPOLL: Station has just PS-polled us.
34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
35 * IEEE80211_TXCTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted.
37 */
38enum ieee80211_sta_info_flags {
39 WLAN_STA_AUTH = 1<<0,
40 WLAN_STA_ASSOC = 1<<1,
41 WLAN_STA_PS = 1<<2,
42 WLAN_STA_AUTHORIZED = 1<<3,
43 WLAN_STA_SHORT_PREAMBLE = 1<<4,
44 WLAN_STA_ASSOC_AP = 1<<5,
45 WLAN_STA_WME = 1<<6,
46 WLAN_STA_WDS = 1<<7,
47 WLAN_STA_PSPOLL = 1<<8,
48 WLAN_STA_CLEAR_PS_FILT = 1<<9,
49};
33 50
34#define STA_TID_NUM 16 51#define STA_TID_NUM 16
35#define ADDBA_RESP_INTERVAL HZ 52#define ADDBA_RESP_INTERVAL HZ
53#define HT_AGG_MAX_RETRIES (0x3)
36 54
37#define HT_AGG_STATE_INITIATOR_SHIFT (4) 55#define HT_AGG_STATE_INITIATOR_SHIFT (4)
38 56
57#define HT_ADDBA_REQUESTED_MSK BIT(0)
58#define HT_ADDBA_DRV_READY_MSK BIT(1)
59#define HT_ADDBA_RECEIVED_MSK BIT(2)
39#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) 60#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3)
40 61#define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT)
41#define HT_AGG_STATE_IDLE (0x0) 62#define HT_AGG_STATE_IDLE (0x0)
42#define HT_AGG_STATE_OPERATIONAL (0x7) 63#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \
64 HT_ADDBA_DRV_READY_MSK | \
65 HT_ADDBA_RECEIVED_MSK)
66#define HT_AGG_STATE_DEBUGFS_CTL BIT(7)
43 67
44/** 68/**
45 * struct tid_ampdu_rx - TID aggregation information (Rx). 69 * struct tid_ampdu_tx - TID aggregation information (Tx).
46 * 70 *
47 * @state: TID's state in session state machine. 71 * @addba_resp_timer: timer for peer's response to addba request
72 * @ssn: Starting Sequence Number expected to be aggregated.
48 * @dialog_token: dialog token for aggregation session 73 * @dialog_token: dialog token for aggregation session
74 */
75struct tid_ampdu_tx {
76 struct timer_list addba_resp_timer;
77 u16 ssn;
78 u8 dialog_token;
79};
80
81/**
82 * struct tid_ampdu_rx - TID aggregation information (Rx).
83 *
84 * @reorder_buf: buffer to reorder incoming aggregated MPDUs
85 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
86 * @head_seq_num: head sequence number in reordering buffer.
87 * @stored_mpdu_num: number of MPDUs in reordering buffer
49 * @ssn: Starting Sequence Number expected to be aggregated. 88 * @ssn: Starting Sequence Number expected to be aggregated.
50 * @buf_size: buffer size for incoming A-MPDUs 89 * @buf_size: buffer size for incoming A-MPDUs
51 * @timeout: reset timer value. 90 * @timeout: reset timer value.
52 * @head_seq_num: head sequence number in reordering buffer. 91 * @dialog_token: dialog token for aggregation session
53 * @stored_mpdu_num: number of MPDUs in reordering buffer
54 * @reorder_buf: buffer to reorder incoming aggregated MPDUs
55 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
56 */ 92 */
57struct tid_ampdu_rx { 93struct tid_ampdu_rx {
58 u8 state; 94 struct sk_buff **reorder_buf;
59 u8 dialog_token; 95 struct timer_list session_timer;
96 u16 head_seq_num;
97 u16 stored_mpdu_num;
60 u16 ssn; 98 u16 ssn;
61 u16 buf_size; 99 u16 buf_size;
62 u16 timeout; 100 u16 timeout;
63 u16 head_seq_num; 101 u8 dialog_token;
64 u16 stored_mpdu_num; 102};
65 struct sk_buff **reorder_buf; 103
66 struct timer_list session_timer; 104/**
105 * enum plink_state - state of a mesh peer link finite state machine
106 *
107 * @PLINK_LISTEN: initial state, considered the implicit state of non existant
108 * mesh peer links
109 * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer
110 * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer
111 * @PLINK_CNF_RCVD: mesh plink confirm frame has been received from this mesh
112 * peer
113 * @PLINK_ESTAB: mesh peer link is established
114 * @PLINK_HOLDING: mesh peer link is being closed or cancelled
115 * @PLINK_BLOCKED: all frames transmitted from this mesh plink are discarded
116 */
117enum plink_state {
118 PLINK_LISTEN,
119 PLINK_OPN_SNT,
120 PLINK_OPN_RCVD,
121 PLINK_CNF_RCVD,
122 PLINK_ESTAB,
123 PLINK_HOLDING,
124 PLINK_BLOCKED
67}; 125};
68 126
69/** 127/**
70 * struct sta_ampdu_mlme - STA aggregation information. 128 * struct sta_ampdu_mlme - STA aggregation information.
71 * 129 *
72 * @tid_agg_info_rx: aggregation info for Rx per TID 130 * @tid_state_rx: TID's state in Rx session state machine.
131 * @tid_rx: aggregation info for Rx per TID
73 * @ampdu_rx: for locking sections in aggregation Rx flow 132 * @ampdu_rx: for locking sections in aggregation Rx flow
133 * @tid_state_tx: TID's state in Tx session state machine.
134 * @tid_tx: aggregation info for Tx per TID
135 * @addba_req_num: number of times addBA request has been sent.
136 * @ampdu_tx: for locking sectionsi in aggregation Tx flow
137 * @dialog_token_allocator: dialog token enumerator for each new session;
74 */ 138 */
75struct sta_ampdu_mlme { 139struct sta_ampdu_mlme {
76 struct tid_ampdu_rx tid_rx[STA_TID_NUM]; 140 /* rx */
141 u8 tid_state_rx[STA_TID_NUM];
142 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
77 spinlock_t ampdu_rx; 143 spinlock_t ampdu_rx;
144 /* tx */
145 u8 tid_state_tx[STA_TID_NUM];
146 struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
147 u8 addba_req_num[STA_TID_NUM];
148 spinlock_t ampdu_tx;
149 u8 dialog_token_allocator;
78}; 150};
79 151
152
153/* see __sta_info_unlink */
154#define STA_INFO_PIN_STAT_NORMAL 0
155#define STA_INFO_PIN_STAT_PINNED 1
156#define STA_INFO_PIN_STAT_DESTROY 2
157
158/**
159 * struct sta_info - STA information
160 *
161 * This structure collects information about a station that
162 * mac80211 is communicating with.
163 *
164 * @list: global linked list entry
165 * @hnext: hash table linked list pointer
166 * @local: pointer to the global information
167 * @addr: MAC address of this STA
168 * @aid: STA's unique AID (1..2007, 0 = not assigned yet),
169 * only used in AP (and IBSS?) mode
170 * @flags: STA flags, see &enum ieee80211_sta_info_flags
171 * @ps_tx_buf: buffer of frames to transmit to this station
172 * when it leaves power saving state
173 * @tx_filtered: buffer of frames we already tried to transmit
174 * but were filtered by hardware due to STA having entered
175 * power saving state
176 * @rx_packets: Number of MSDUs received from this STA
177 * @rx_bytes: Number of bytes received from this STA
178 * @supp_rates: Bitmap of supported rates (per band)
179 * @ht_info: HT capabilities of this STA
180 */
80struct sta_info { 181struct sta_info {
81 struct kref kref; 182 /* General information, mostly static */
82 struct list_head list; 183 struct list_head list;
83 struct sta_info *hnext; /* next entry in hash table list */ 184 struct sta_info *hnext;
84
85 struct ieee80211_local *local; 185 struct ieee80211_local *local;
86 186 struct ieee80211_sub_if_data *sdata;
87 u8 addr[ETH_ALEN];
88 u16 aid; /* STA's unique AID (1..2007), 0 = not yet assigned */
89 u32 flags; /* WLAN_STA_ */
90
91 struct sk_buff_head ps_tx_buf; /* buffer of TX frames for station in
92 * power saving state */
93 int pspoll; /* whether STA has send a PS Poll frame */
94 struct sk_buff_head tx_filtered; /* buffer of TX frames that were
95 * already given to low-level driver,
96 * but were filtered */
97 int clear_dst_mask;
98
99 unsigned long rx_packets, tx_packets; /* number of RX/TX MSDUs */
100 unsigned long rx_bytes, tx_bytes;
101 unsigned long tx_retry_failed, tx_retry_count;
102 unsigned long tx_filtered_count;
103
104 unsigned int wep_weak_iv_count; /* number of RX frames with weak IV */
105
106 unsigned long last_rx;
107 u32 supp_rates; /* bitmap of supported rates in local->curr_rates */
108 int txrate; /* index in local->curr_rates */
109 int last_txrate; /* last rate used to send a frame to this STA */
110 int last_nonerp_idx;
111
112 struct net_device *dev; /* which net device is this station associated
113 * to */
114
115 struct ieee80211_key *key; 187 struct ieee80211_key *key;
116
117 u32 tx_num_consecutive_failures;
118 u32 tx_num_mpdu_ok;
119 u32 tx_num_mpdu_fail;
120
121 struct rate_control_ref *rate_ctrl; 188 struct rate_control_ref *rate_ctrl;
122 void *rate_ctrl_priv; 189 void *rate_ctrl_priv;
190 struct ieee80211_ht_info ht_info;
191 u64 supp_rates[IEEE80211_NUM_BANDS];
192 u8 addr[ETH_ALEN];
193 u16 aid;
194 u16 listen_interval;
123 195
124 /* last received seq/frag number from this STA (per RX queue) */ 196 /*
125 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 197 * for use by the internal lifetime management,
198 * see __sta_info_unlink
199 */
200 u8 pin_status;
201
202 /* frequently updated information, needs locking? */
203 u32 flags;
204
205 /*
206 * STA powersave frame queues, no more than the internal
207 * locking required.
208 */
209 struct sk_buff_head ps_tx_buf;
210 struct sk_buff_head tx_filtered;
211
212 /* Updated from RX path only, no locking requirements */
213 unsigned long rx_packets, rx_bytes;
214 unsigned long wep_weak_iv_count;
215 unsigned long last_rx;
126 unsigned long num_duplicates; /* number of duplicate frames received 216 unsigned long num_duplicates; /* number of duplicate frames received
127 * from this STA */ 217 * from this STA */
128 unsigned long tx_fragments; /* number of transmitted MPDUs */
129 unsigned long rx_fragments; /* number of received MPDUs */ 218 unsigned long rx_fragments; /* number of received MPDUs */
130 unsigned long rx_dropped; /* number of dropped MPDUs from this STA */ 219 unsigned long rx_dropped; /* number of dropped MPDUs from this STA */
131
132 int last_rssi; /* RSSI of last received frame from this STA */ 220 int last_rssi; /* RSSI of last received frame from this STA */
133 int last_signal; /* signal of last received frame from this STA */ 221 int last_signal; /* signal of last received frame from this STA */
134 int last_noise; /* noise of last received frame from this STA */ 222 int last_noise; /* noise of last received frame from this STA */
135 int last_ack_rssi[3]; /* RSSI of last received ACKs from this STA */ 223 /* last received seq/frag number from this STA (per RX queue) */
136 unsigned long last_ack; 224 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
137 int channel_use;
138 int channel_use_raw;
139
140#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 225#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
141 unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; 226 unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES];
227#endif
228
229 /* Updated from TX status path only, no locking requirements */
230 unsigned long tx_filtered_count;
231 unsigned long tx_retry_failed, tx_retry_count;
232 /* TODO: update in generic code not rate control? */
233 u32 tx_num_consecutive_failures;
234 u32 tx_num_mpdu_ok;
235 u32 tx_num_mpdu_fail;
236 /* moving percentage of failed MSDUs */
237 unsigned int fail_avg;
238
239 /* Updated from TX path only, no locking requirements */
240 unsigned long tx_packets; /* number of RX/TX MSDUs */
241 unsigned long tx_bytes;
242 unsigned long tx_fragments; /* number of transmitted MPDUs */
243 int txrate_idx;
244 int last_txrate_idx;
245#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
142 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; 246 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
143#endif /* CONFIG_MAC80211_DEBUG_COUNTERS */ 247#endif
144 248
145 u16 listen_interval; 249 /* Debug counters, no locking doesn't matter */
250 int channel_use;
251 int channel_use_raw;
146 252
147 struct ieee80211_ht_info ht_info; /* 802.11n HT capabilities 253 /*
148 of this STA */ 254 * Aggregation information, comes with own locking.
255 */
149 struct sta_ampdu_mlme ampdu_mlme; 256 struct sta_ampdu_mlme ampdu_mlme;
150 u8 timer_to_tid[STA_TID_NUM]; /* convert timer id to tid */ 257 u8 timer_to_tid[STA_TID_NUM]; /* identity mapping to ID timers */
258 u8 tid_to_tx_q[STA_TID_NUM]; /* map tid to tx queue */
259
260#ifdef CONFIG_MAC80211_MESH
261 /*
262 * Mesh peer link attributes
263 * TODO: move to a sub-structure that is referenced with pointer?
264 */
265 __le16 llid; /* Local link ID */
266 __le16 plid; /* Peer link ID */
267 __le16 reason; /* Cancel reason on PLINK_HOLDING state */
268 u8 plink_retries; /* Retries in establishment */
269 bool ignore_plink_timer;
270 enum plink_state plink_state;
271 u32 plink_timeout;
272 struct timer_list plink_timer;
273 spinlock_t plink_lock; /* For peer_state reads / updates and other
274 updates in the structure. Ensures robust
275 transitions for the peerlink FSM */
276#endif
151 277
152#ifdef CONFIG_MAC80211_DEBUGFS 278#ifdef CONFIG_MAC80211_DEBUGFS
153 struct sta_info_debugfsdentries { 279 struct sta_info_debugfsdentries {
154 struct dentry *dir; 280 struct dentry *dir;
155 struct dentry *flags; 281 struct dentry *flags;
156 struct dentry *num_ps_buf_frames; 282 struct dentry *num_ps_buf_frames;
157 struct dentry *last_ack_rssi;
158 struct dentry *last_ack_ms;
159 struct dentry *inactive_ms; 283 struct dentry *inactive_ms;
160 struct dentry *last_seq_ctrl; 284 struct dentry *last_seq_ctrl;
161#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 285#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
162 struct dentry *wme_rx_queue; 286 struct dentry *wme_rx_queue;
163 struct dentry *wme_tx_queue; 287 struct dentry *wme_tx_queue;
164#endif 288#endif
289 struct dentry *agg_status;
165 } debugfs; 290 } debugfs;
166#endif 291#endif
167}; 292};
168 293
294static inline enum plink_state sta_plink_state(struct sta_info *sta)
295{
296#ifdef CONFIG_MAC80211_MESH
297 return sta->plink_state;
298#endif
299 return PLINK_LISTEN;
300}
301
169 302
170/* Maximum number of concurrently registered stations */ 303/* Maximum number of concurrently registered stations */
171#define MAX_STA_COUNT 2007 304#define MAX_STA_COUNT 2007
@@ -185,22 +318,46 @@ struct sta_info {
185 */ 318 */
186#define STA_INFO_CLEANUP_INTERVAL (10 * HZ) 319#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
187 320
188static inline void __sta_info_get(struct sta_info *sta) 321/*
189{ 322 * Get a STA info, must have be under RCU read lock.
190 kref_get(&sta->kref); 323 */
191} 324struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr);
325/*
326 * Get STA info by index, BROKEN!
327 */
328struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
329 struct net_device *dev);
330/*
331 * Create a new STA info, caller owns returned structure
332 * until sta_info_insert().
333 */
334struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
335 u8 *addr, gfp_t gfp);
336/*
337 * Insert STA info into hash table/list, returns zero or a
338 * -EEXIST if (if the same MAC address is already present).
339 *
340 * Calling this without RCU protection makes the caller
341 * relinquish its reference to @sta.
342 */
343int sta_info_insert(struct sta_info *sta);
344/*
345 * Unlink a STA info from the hash table/list.
346 * This can NULL the STA pointer if somebody else
347 * has already unlinked it.
348 */
349void sta_info_unlink(struct sta_info **sta);
350void __sta_info_unlink(struct sta_info **sta);
351
352void sta_info_destroy(struct sta_info *sta);
353void sta_info_set_tim_bit(struct sta_info *sta);
354void sta_info_clear_tim_bit(struct sta_info *sta);
192 355
193struct sta_info * sta_info_get(struct ieee80211_local *local, u8 *addr);
194int sta_info_min_txrate_get(struct ieee80211_local *local);
195void sta_info_put(struct sta_info *sta);
196struct sta_info * sta_info_add(struct ieee80211_local *local,
197 struct net_device *dev, u8 *addr, gfp_t gfp);
198void sta_info_remove(struct sta_info *sta);
199void sta_info_free(struct sta_info *sta);
200void sta_info_init(struct ieee80211_local *local); 356void sta_info_init(struct ieee80211_local *local);
201int sta_info_start(struct ieee80211_local *local); 357int sta_info_start(struct ieee80211_local *local);
202void sta_info_stop(struct ieee80211_local *local); 358void sta_info_stop(struct ieee80211_local *local);
203void sta_info_remove_aid_ptr(struct sta_info *sta); 359int sta_info_flush(struct ieee80211_local *local,
204void sta_info_flush(struct ieee80211_local *local, struct net_device *dev); 360 struct ieee80211_sub_if_data *sdata);
361void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata);
205 362
206#endif /* STA_INFO_H */ 363#endif /* STA_INFO_H */
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 3abe194e4d55..dddbfd60f351 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -12,7 +12,7 @@
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13 13
14#include <net/mac80211.h> 14#include <net/mac80211.h>
15#include "ieee80211_key.h" 15#include "key.h"
16#include "tkip.h" 16#include "tkip.h"
17#include "wep.h" 17#include "wep.h"
18 18
@@ -214,6 +214,59 @@ void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta,
214 key->u.tkip.iv16, rc4key); 214 key->u.tkip.iv16, rc4key);
215} 215}
216 216
217void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
218 struct sk_buff *skb, enum ieee80211_tkip_key_type type,
219 u8 *outkey)
220{
221 struct ieee80211_key *key = (struct ieee80211_key *)
222 container_of(keyconf, struct ieee80211_key, conf);
223 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
224 u8 *data = (u8 *) hdr;
225 u16 fc = le16_to_cpu(hdr->frame_control);
226 int hdr_len = ieee80211_get_hdrlen(fc);
227 u8 *ta = hdr->addr2;
228 u16 iv16;
229 u32 iv32;
230
231 iv16 = data[hdr_len] << 8;
232 iv16 += data[hdr_len + 2];
233 iv32 = data[hdr_len + 4] +
234 (data[hdr_len + 5] >> 8) +
235 (data[hdr_len + 6] >> 16) +
236 (data[hdr_len + 7] >> 24);
237
238#ifdef CONFIG_TKIP_DEBUG
239 printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n",
240 iv16, iv32);
241
242 if (iv32 != key->u.tkip.iv32) {
243 printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n",
244 iv32, key->u.tkip.iv32);
245 printk(KERN_DEBUG "Wrap around of iv16 in the middle of a "
246 "fragmented packet\n");
247 }
248#endif /* CONFIG_TKIP_DEBUG */
249
250 /* Update the p1k only when the iv16 in the packet wraps around, this
251 * might occur after the wrap around of iv16 in the key in case of
252 * fragmented packets. */
253 if (iv16 == 0 || !key->u.tkip.tx_initialized) {
254 /* IV16 wrapped around - perform TKIP phase 1 */
255 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
256 iv32, key->u.tkip.p1k);
257 key->u.tkip.tx_initialized = 1;
258 }
259
260 if (type == IEEE80211_TKIP_P1_KEY) {
261 memcpy(outkey, key->u.tkip.p1k, sizeof(u16) * 5);
262 return;
263 }
264
265 tkip_mixing_phase2(key->u.tkip.p1k,
266 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], iv16, outkey);
267}
268EXPORT_SYMBOL(ieee80211_get_tkip_key);
269
217/* Encrypt packet payload with TKIP using @key. @pos is a pointer to the 270/* Encrypt packet payload with TKIP using @key. @pos is a pointer to the
218 * beginning of the buffer containing payload. This payload must include 271 * beginning of the buffer containing payload. This payload must include
219 * headroom of eight octets for IV and Ext. IV and taildroom of four octets 272 * headroom of eight octets for IV and Ext. IV and taildroom of four octets
@@ -238,7 +291,7 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
238int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, 291int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
239 struct ieee80211_key *key, 292 struct ieee80211_key *key,
240 u8 *payload, size_t payload_len, u8 *ta, 293 u8 *payload, size_t payload_len, u8 *ta,
241 int only_iv, int queue, 294 u8 *ra, int only_iv, int queue,
242 u32 *out_iv32, u16 *out_iv16) 295 u32 *out_iv32, u16 *out_iv16)
243{ 296{
244 u32 iv32; 297 u32 iv32;
@@ -315,6 +368,19 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
315 printk("\n"); 368 printk("\n");
316 } 369 }
317#endif /* CONFIG_TKIP_DEBUG */ 370#endif /* CONFIG_TKIP_DEBUG */
371 if (key->local->ops->update_tkip_key &&
372 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
373 u8 bcast[ETH_ALEN] =
374 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
375 u8 *sta_addr = key->sta->addr;
376
377 if (is_multicast_ether_addr(ra))
378 sta_addr = bcast;
379
380 key->local->ops->update_tkip_key(
381 local_to_hw(key->local), &key->conf,
382 sta_addr, iv32, key->u.tkip.p1k_rx[queue]);
383 }
318 } 384 }
319 385
320 tkip_mixing_phase2(key->u.tkip.p1k_rx[queue], 386 tkip_mixing_phase2(key->u.tkip.p1k_rx[queue],
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index 73d8ef2a93b0..b7c2ee763d9d 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -11,7 +11,7 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14#include "ieee80211_key.h" 14#include "key.h"
15 15
16u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, 16u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key,
17 u8 iv0, u8 iv1, u8 iv2); 17 u8 iv0, u8 iv1, u8 iv2);
@@ -31,7 +31,7 @@ enum {
31int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, 31int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
32 struct ieee80211_key *key, 32 struct ieee80211_key *key,
33 u8 *payload, size_t payload_len, u8 *ta, 33 u8 *payload, size_t payload_len, u8 *ta,
34 int only_iv, int queue, 34 u8 *ra, int only_iv, int queue,
35 u32 *out_iv32, u16 *out_iv16); 35 u32 *out_iv32, u16 *out_iv16);
36 36
37#endif /* TKIP_H */ 37#endif /* TKIP_H */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 67b509edd431..f35eaea98e73 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -25,11 +25,12 @@
25#include <asm/unaligned.h> 25#include <asm/unaligned.h>
26 26
27#include "ieee80211_i.h" 27#include "ieee80211_i.h"
28#include "ieee80211_led.h" 28#include "led.h"
29#include "mesh.h"
29#include "wep.h" 30#include "wep.h"
30#include "wpa.h" 31#include "wpa.h"
31#include "wme.h" 32#include "wme.h"
32#include "ieee80211_rate.h" 33#include "rate.h"
33 34
34#define IEEE80211_TX_OK 0 35#define IEEE80211_TX_OK 0
35#define IEEE80211_TX_AGAIN 1 36#define IEEE80211_TX_AGAIN 1
@@ -86,15 +87,19 @@ static inline void ieee80211_dump_frame(const char *ifname, const char *title,
86} 87}
87#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ 88#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
88 89
89static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr, 90static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
90 int next_frag_len) 91 int next_frag_len)
91{ 92{
92 int rate, mrate, erp, dur, i; 93 int rate, mrate, erp, dur, i;
93 struct ieee80211_rate *txrate = tx->u.tx.rate; 94 struct ieee80211_rate *txrate = tx->rate;
94 struct ieee80211_local *local = tx->local; 95 struct ieee80211_local *local = tx->local;
95 struct ieee80211_hw_mode *mode = tx->u.tx.mode; 96 struct ieee80211_supported_band *sband;
96 97
97 erp = txrate->flags & IEEE80211_RATE_ERP; 98 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
99
100 erp = 0;
101 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
102 erp = txrate->flags & IEEE80211_RATE_ERP_G;
98 103
99 /* 104 /*
100 * data and mgmt (except PS Poll): 105 * data and mgmt (except PS Poll):
@@ -150,20 +155,36 @@ static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr,
150 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps 155 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
151 */ 156 */
152 rate = -1; 157 rate = -1;
153 mrate = 10; /* use 1 Mbps if everything fails */ 158 /* use lowest available if everything fails */
154 for (i = 0; i < mode->num_rates; i++) { 159 mrate = sband->bitrates[0].bitrate;
155 struct ieee80211_rate *r = &mode->rates[i]; 160 for (i = 0; i < sband->n_bitrates; i++) {
156 if (r->rate > txrate->rate) 161 struct ieee80211_rate *r = &sband->bitrates[i];
157 break;
158 162
159 if (IEEE80211_RATE_MODULATION(txrate->flags) != 163 if (r->bitrate > txrate->bitrate)
160 IEEE80211_RATE_MODULATION(r->flags)) 164 break;
161 continue;
162 165
163 if (r->flags & IEEE80211_RATE_BASIC) 166 if (tx->sdata->basic_rates & BIT(i))
164 rate = r->rate; 167 rate = r->bitrate;
165 else if (r->flags & IEEE80211_RATE_MANDATORY) 168
166 mrate = r->rate; 169 switch (sband->band) {
170 case IEEE80211_BAND_2GHZ: {
171 u32 flag;
172 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
173 flag = IEEE80211_RATE_MANDATORY_G;
174 else
175 flag = IEEE80211_RATE_MANDATORY_B;
176 if (r->flags & flag)
177 mrate = r->bitrate;
178 break;
179 }
180 case IEEE80211_BAND_5GHZ:
181 if (r->flags & IEEE80211_RATE_MANDATORY_A)
182 mrate = r->bitrate;
183 break;
184 case IEEE80211_NUM_BANDS:
185 WARN_ON(1);
186 break;
187 }
167 } 188 }
168 if (rate == -1) { 189 if (rate == -1) {
169 /* No matching basic rate found; use highest suitable mandatory 190 /* No matching basic rate found; use highest suitable mandatory
@@ -184,7 +205,7 @@ static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr,
184 dur *= 2; /* ACK + SIFS */ 205 dur *= 2; /* ACK + SIFS */
185 /* next fragment */ 206 /* next fragment */
186 dur += ieee80211_frame_duration(local, next_frag_len, 207 dur += ieee80211_frame_duration(local, next_frag_len,
187 txrate->rate, erp, 208 txrate->bitrate, erp,
188 tx->sdata->bss_conf.use_short_preamble); 209 tx->sdata->bss_conf.use_short_preamble);
189 } 210 }
190 211
@@ -212,8 +233,8 @@ static int inline is_ieee80211_device(struct net_device *dev,
212 233
213/* tx handlers */ 234/* tx handlers */
214 235
215static ieee80211_txrx_result 236static ieee80211_tx_result
216ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx) 237ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
217{ 238{
218#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 239#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
219 struct sk_buff *skb = tx->skb; 240 struct sk_buff *skb = tx->skb;
@@ -221,20 +242,23 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx)
221#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 242#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
222 u32 sta_flags; 243 u32 sta_flags;
223 244
224 if (unlikely(tx->flags & IEEE80211_TXRXD_TX_INJECTED)) 245 if (unlikely(tx->flags & IEEE80211_TX_INJECTED))
225 return TXRX_CONTINUE; 246 return TX_CONTINUE;
226 247
227 if (unlikely(tx->local->sta_sw_scanning) && 248 if (unlikely(tx->local->sta_sw_scanning) &&
228 ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 249 ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
229 (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ)) 250 (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ))
230 return TXRX_DROP; 251 return TX_DROP;
231 252
232 if (tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED) 253 if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT)
233 return TXRX_CONTINUE; 254 return TX_CONTINUE;
255
256 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
257 return TX_CONTINUE;
234 258
235 sta_flags = tx->sta ? tx->sta->flags : 0; 259 sta_flags = tx->sta ? tx->sta->flags : 0;
236 260
237 if (likely(tx->flags & IEEE80211_TXRXD_TXUNICAST)) { 261 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
238 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 262 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
239 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 263 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
240 (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { 264 (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
@@ -245,7 +269,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx)
245 tx->dev->name, print_mac(mac, hdr->addr1)); 269 tx->dev->name, print_mac(mac, hdr->addr1));
246#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 270#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
247 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 271 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
248 return TXRX_DROP; 272 return TX_DROP;
249 } 273 }
250 } else { 274 } else {
251 if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 275 if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
@@ -255,23 +279,23 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx)
255 * No associated STAs - no need to send multicast 279 * No associated STAs - no need to send multicast
256 * frames. 280 * frames.
257 */ 281 */
258 return TXRX_DROP; 282 return TX_DROP;
259 } 283 }
260 return TXRX_CONTINUE; 284 return TX_CONTINUE;
261 } 285 }
262 286
263 return TXRX_CONTINUE; 287 return TX_CONTINUE;
264} 288}
265 289
266static ieee80211_txrx_result 290static ieee80211_tx_result
267ieee80211_tx_h_sequence(struct ieee80211_txrx_data *tx) 291ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
268{ 292{
269 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 293 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
270 294
271 if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) 295 if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24)
272 ieee80211_include_sequence(tx->sdata, hdr); 296 ieee80211_include_sequence(tx->sdata, hdr);
273 297
274 return TXRX_CONTINUE; 298 return TX_CONTINUE;
275} 299}
276 300
277/* This function is called whenever the AP is about to exceed the maximum limit 301/* This function is called whenever the AP is about to exceed the maximum limit
@@ -303,10 +327,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
303 } 327 }
304 total += skb_queue_len(&ap->ps_bc_buf); 328 total += skb_queue_len(&ap->ps_bc_buf);
305 } 329 }
306 rcu_read_unlock();
307 330
308 read_lock_bh(&local->sta_lock); 331 list_for_each_entry_rcu(sta, &local->sta_list, list) {
309 list_for_each_entry(sta, &local->sta_list, list) {
310 skb = skb_dequeue(&sta->ps_tx_buf); 332 skb = skb_dequeue(&sta->ps_tx_buf);
311 if (skb) { 333 if (skb) {
312 purged++; 334 purged++;
@@ -314,15 +336,16 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
314 } 336 }
315 total += skb_queue_len(&sta->ps_tx_buf); 337 total += skb_queue_len(&sta->ps_tx_buf);
316 } 338 }
317 read_unlock_bh(&local->sta_lock); 339
340 rcu_read_unlock();
318 341
319 local->total_ps_buffered = total; 342 local->total_ps_buffered = total;
320 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", 343 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n",
321 wiphy_name(local->hw.wiphy), purged); 344 wiphy_name(local->hw.wiphy), purged);
322} 345}
323 346
324static ieee80211_txrx_result 347static ieee80211_tx_result
325ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx) 348ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
326{ 349{
327 /* 350 /*
328 * broadcast/multicast frame 351 * broadcast/multicast frame
@@ -334,11 +357,11 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx)
334 357
335 /* not AP/IBSS or ordered frame */ 358 /* not AP/IBSS or ordered frame */
336 if (!tx->sdata->bss || (tx->fc & IEEE80211_FCTL_ORDER)) 359 if (!tx->sdata->bss || (tx->fc & IEEE80211_FCTL_ORDER))
337 return TXRX_CONTINUE; 360 return TX_CONTINUE;
338 361
339 /* no stations in PS mode */ 362 /* no stations in PS mode */
340 if (!atomic_read(&tx->sdata->bss->num_sta_ps)) 363 if (!atomic_read(&tx->sdata->bss->num_sta_ps))
341 return TXRX_CONTINUE; 364 return TX_CONTINUE;
342 365
343 /* buffered in mac80211 */ 366 /* buffered in mac80211 */
344 if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) { 367 if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) {
@@ -355,17 +378,17 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx)
355 } else 378 } else
356 tx->local->total_ps_buffered++; 379 tx->local->total_ps_buffered++;
357 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); 380 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
358 return TXRX_QUEUED; 381 return TX_QUEUED;
359 } 382 }
360 383
361 /* buffered in hardware */ 384 /* buffered in hardware */
362 tx->u.tx.control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; 385 tx->control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM;
363 386
364 return TXRX_CONTINUE; 387 return TX_CONTINUE;
365} 388}
366 389
367static ieee80211_txrx_result 390static ieee80211_tx_result
368ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) 391ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
369{ 392{
370 struct sta_info *sta = tx->sta; 393 struct sta_info *sta = tx->sta;
371 DECLARE_MAC_BUF(mac); 394 DECLARE_MAC_BUF(mac);
@@ -373,9 +396,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
373 if (unlikely(!sta || 396 if (unlikely(!sta ||
374 ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && 397 ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
375 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) 398 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
376 return TXRX_CONTINUE; 399 return TX_CONTINUE;
377 400
378 if (unlikely((sta->flags & WLAN_STA_PS) && !sta->pspoll)) { 401 if (unlikely((sta->flags & WLAN_STA_PS) &&
402 !(sta->flags & WLAN_STA_PSPOLL))) {
379 struct ieee80211_tx_packet_data *pkt_data; 403 struct ieee80211_tx_packet_data *pkt_data;
380#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 404#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
381 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " 405 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
@@ -383,7 +407,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
383 print_mac(mac, sta->addr), sta->aid, 407 print_mac(mac, sta->addr), sta->aid,
384 skb_queue_len(&sta->ps_tx_buf)); 408 skb_queue_len(&sta->ps_tx_buf));
385#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 409#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
386 sta->flags |= WLAN_STA_TIM;
387 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 410 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
388 purge_old_ps_buffers(tx->local); 411 purge_old_ps_buffers(tx->local);
389 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { 412 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
@@ -396,18 +419,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
396 dev_kfree_skb(old); 419 dev_kfree_skb(old);
397 } else 420 } else
398 tx->local->total_ps_buffered++; 421 tx->local->total_ps_buffered++;
422
399 /* Queue frame to be sent after STA sends an PS Poll frame */ 423 /* Queue frame to be sent after STA sends an PS Poll frame */
400 if (skb_queue_empty(&sta->ps_tx_buf)) { 424 if (skb_queue_empty(&sta->ps_tx_buf))
401 if (tx->local->ops->set_tim) 425 sta_info_set_tim_bit(sta);
402 tx->local->ops->set_tim(local_to_hw(tx->local), 426
403 sta->aid, 1);
404 if (tx->sdata->bss)
405 bss_tim_set(tx->local, tx->sdata->bss, sta->aid);
406 }
407 pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; 427 pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb;
408 pkt_data->jiffies = jiffies; 428 pkt_data->jiffies = jiffies;
409 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 429 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
410 return TXRX_QUEUED; 430 return TX_QUEUED;
411 } 431 }
412#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 432#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
413 else if (unlikely(sta->flags & WLAN_STA_PS)) { 433 else if (unlikely(sta->flags & WLAN_STA_PS)) {
@@ -416,40 +436,40 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
416 print_mac(mac, sta->addr)); 436 print_mac(mac, sta->addr));
417 } 437 }
418#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 438#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
419 sta->pspoll = 0; 439 sta->flags &= ~WLAN_STA_PSPOLL;
420 440
421 return TXRX_CONTINUE; 441 return TX_CONTINUE;
422} 442}
423 443
424static ieee80211_txrx_result 444static ieee80211_tx_result
425ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx) 445ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
426{ 446{
427 if (unlikely(tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED)) 447 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
428 return TXRX_CONTINUE; 448 return TX_CONTINUE;
429 449
430 if (tx->flags & IEEE80211_TXRXD_TXUNICAST) 450 if (tx->flags & IEEE80211_TX_UNICAST)
431 return ieee80211_tx_h_unicast_ps_buf(tx); 451 return ieee80211_tx_h_unicast_ps_buf(tx);
432 else 452 else
433 return ieee80211_tx_h_multicast_ps_buf(tx); 453 return ieee80211_tx_h_multicast_ps_buf(tx);
434} 454}
435 455
436static ieee80211_txrx_result 456static ieee80211_tx_result
437ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx) 457ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
438{ 458{
439 struct ieee80211_key *key; 459 struct ieee80211_key *key;
440 u16 fc = tx->fc; 460 u16 fc = tx->fc;
441 461
442 if (unlikely(tx->u.tx.control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) 462 if (unlikely(tx->control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
443 tx->key = NULL; 463 tx->key = NULL;
444 else if (tx->sta && (key = rcu_dereference(tx->sta->key))) 464 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
445 tx->key = key; 465 tx->key = key;
446 else if ((key = rcu_dereference(tx->sdata->default_key))) 466 else if ((key = rcu_dereference(tx->sdata->default_key)))
447 tx->key = key; 467 tx->key = key;
448 else if (tx->sdata->drop_unencrypted && 468 else if (tx->sdata->drop_unencrypted &&
449 !(tx->u.tx.control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && 469 !(tx->control->flags & IEEE80211_TXCTL_EAPOL_FRAME) &&
450 !(tx->flags & IEEE80211_TXRXD_TX_INJECTED)) { 470 !(tx->flags & IEEE80211_TX_INJECTED)) {
451 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 471 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
452 return TXRX_DROP; 472 return TX_DROP;
453 } else 473 } else
454 tx->key = NULL; 474 tx->key = NULL;
455 475
@@ -476,13 +496,13 @@ ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx)
476 } 496 }
477 497
478 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 498 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
479 tx->u.tx.control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 499 tx->control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
480 500
481 return TXRX_CONTINUE; 501 return TX_CONTINUE;
482} 502}
483 503
484static ieee80211_txrx_result 504static ieee80211_tx_result
485ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx) 505ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
486{ 506{
487 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 507 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
488 size_t hdrlen, per_fragm, num_fragm, payload_len, left; 508 size_t hdrlen, per_fragm, num_fragm, payload_len, left;
@@ -492,8 +512,8 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
492 u8 *pos; 512 u8 *pos;
493 int frag_threshold = tx->local->fragmentation_threshold; 513 int frag_threshold = tx->local->fragmentation_threshold;
494 514
495 if (!(tx->flags & IEEE80211_TXRXD_FRAGMENTED)) 515 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
496 return TXRX_CONTINUE; 516 return TX_CONTINUE;
497 517
498 first = tx->skb; 518 first = tx->skb;
499 519
@@ -544,10 +564,10 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
544 } 564 }
545 skb_trim(first, hdrlen + per_fragm); 565 skb_trim(first, hdrlen + per_fragm);
546 566
547 tx->u.tx.num_extra_frag = num_fragm - 1; 567 tx->num_extra_frag = num_fragm - 1;
548 tx->u.tx.extra_frag = frags; 568 tx->extra_frag = frags;
549 569
550 return TXRX_CONTINUE; 570 return TX_CONTINUE;
551 571
552 fail: 572 fail:
553 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name); 573 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name);
@@ -558,14 +578,14 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
558 kfree(frags); 578 kfree(frags);
559 } 579 }
560 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment); 580 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
561 return TXRX_DROP; 581 return TX_DROP;
562} 582}
563 583
564static ieee80211_txrx_result 584static ieee80211_tx_result
565ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx) 585ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
566{ 586{
567 if (!tx->key) 587 if (!tx->key)
568 return TXRX_CONTINUE; 588 return TX_CONTINUE;
569 589
570 switch (tx->key->conf.alg) { 590 switch (tx->key->conf.alg) {
571 case ALG_WEP: 591 case ALG_WEP:
@@ -578,59 +598,60 @@ ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx)
578 598
579 /* not reached */ 599 /* not reached */
580 WARN_ON(1); 600 WARN_ON(1);
581 return TXRX_DROP; 601 return TX_DROP;
582} 602}
583 603
584static ieee80211_txrx_result 604static ieee80211_tx_result
585ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx) 605ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
586{ 606{
587 struct rate_selection rsel; 607 struct rate_selection rsel;
608 struct ieee80211_supported_band *sband;
609
610 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band];
588 611
589 if (likely(!tx->u.tx.rate)) { 612 if (likely(!tx->rate)) {
590 rate_control_get_rate(tx->dev, tx->u.tx.mode, tx->skb, &rsel); 613 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
591 tx->u.tx.rate = rsel.rate; 614 tx->rate = rsel.rate;
592 if (unlikely(rsel.probe != NULL)) { 615 if (unlikely(rsel.probe)) {
593 tx->u.tx.control->flags |= 616 tx->control->flags |=
594 IEEE80211_TXCTL_RATE_CTRL_PROBE; 617 IEEE80211_TXCTL_RATE_CTRL_PROBE;
595 tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; 618 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
596 tx->u.tx.control->alt_retry_rate = tx->u.tx.rate->val; 619 tx->control->alt_retry_rate = tx->rate;
597 tx->u.tx.rate = rsel.probe; 620 tx->rate = rsel.probe;
598 } else 621 } else
599 tx->u.tx.control->alt_retry_rate = -1; 622 tx->control->alt_retry_rate = NULL;
600 623
601 if (!tx->u.tx.rate) 624 if (!tx->rate)
602 return TXRX_DROP; 625 return TX_DROP;
603 } else 626 } else
604 tx->u.tx.control->alt_retry_rate = -1; 627 tx->control->alt_retry_rate = NULL;
605 628
606 if (tx->u.tx.mode->mode == MODE_IEEE80211G && 629 if (tx->sdata->bss_conf.use_cts_prot &&
607 tx->sdata->bss_conf.use_cts_prot && 630 (tx->flags & IEEE80211_TX_FRAGMENTED) && rsel.nonerp) {
608 (tx->flags & IEEE80211_TXRXD_FRAGMENTED) && rsel.nonerp) { 631 tx->last_frag_rate = tx->rate;
609 tx->u.tx.last_frag_rate = tx->u.tx.rate;
610 if (rsel.probe) 632 if (rsel.probe)
611 tx->flags &= ~IEEE80211_TXRXD_TXPROBE_LAST_FRAG; 633 tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG;
612 else 634 else
613 tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; 635 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
614 tx->u.tx.rate = rsel.nonerp; 636 tx->rate = rsel.nonerp;
615 tx->u.tx.control->rate = rsel.nonerp; 637 tx->control->tx_rate = rsel.nonerp;
616 tx->u.tx.control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE; 638 tx->control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE;
617 } else { 639 } else {
618 tx->u.tx.last_frag_rate = tx->u.tx.rate; 640 tx->last_frag_rate = tx->rate;
619 tx->u.tx.control->rate = tx->u.tx.rate; 641 tx->control->tx_rate = tx->rate;
620 } 642 }
621 tx->u.tx.control->tx_rate = tx->u.tx.rate->val; 643 tx->control->tx_rate = tx->rate;
622 644
623 return TXRX_CONTINUE; 645 return TX_CONTINUE;
624} 646}
625 647
626static ieee80211_txrx_result 648static ieee80211_tx_result
627ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx) 649ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
628{ 650{
629 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 651 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
630 u16 fc = le16_to_cpu(hdr->frame_control); 652 u16 fc = le16_to_cpu(hdr->frame_control);
631 u16 dur; 653 u16 dur;
632 struct ieee80211_tx_control *control = tx->u.tx.control; 654 struct ieee80211_tx_control *control = tx->control;
633 struct ieee80211_hw_mode *mode = tx->u.tx.mode;
634 655
635 if (!control->retry_limit) { 656 if (!control->retry_limit) {
636 if (!is_multicast_ether_addr(hdr->addr1)) { 657 if (!is_multicast_ether_addr(hdr->addr1)) {
@@ -652,20 +673,20 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
652 } 673 }
653 } 674 }
654 675
655 if (tx->flags & IEEE80211_TXRXD_FRAGMENTED) { 676 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
656 /* Do not use multiple retry rates when sending fragmented 677 /* Do not use multiple retry rates when sending fragmented
657 * frames. 678 * frames.
658 * TODO: The last fragment could still use multiple retry 679 * TODO: The last fragment could still use multiple retry
659 * rates. */ 680 * rates. */
660 control->alt_retry_rate = -1; 681 control->alt_retry_rate = NULL;
661 } 682 }
662 683
663 /* Use CTS protection for unicast frames sent using extended rates if 684 /* Use CTS protection for unicast frames sent using extended rates if
664 * there are associated non-ERP stations and RTS/CTS is not configured 685 * there are associated non-ERP stations and RTS/CTS is not configured
665 * for the frame. */ 686 * for the frame. */
666 if (mode->mode == MODE_IEEE80211G && 687 if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) &&
667 (tx->u.tx.rate->flags & IEEE80211_RATE_ERP) && 688 (tx->rate->flags & IEEE80211_RATE_ERP_G) &&
668 (tx->flags & IEEE80211_TXRXD_TXUNICAST) && 689 (tx->flags & IEEE80211_TX_UNICAST) &&
669 tx->sdata->bss_conf.use_cts_prot && 690 tx->sdata->bss_conf.use_cts_prot &&
670 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) 691 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS))
671 control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT; 692 control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT;
@@ -674,62 +695,77 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
674 * short preambles at the selected rate and short preambles are 695 * short preambles at the selected rate and short preambles are
675 * available on the network at the current point in time. */ 696 * available on the network at the current point in time. */
676 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && 697 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
677 (tx->u.tx.rate->flags & IEEE80211_RATE_PREAMBLE2) && 698 (tx->rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
678 tx->sdata->bss_conf.use_short_preamble && 699 tx->sdata->bss_conf.use_short_preamble &&
679 (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) { 700 (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) {
680 tx->u.tx.control->tx_rate = tx->u.tx.rate->val2; 701 tx->control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
681 } 702 }
682 703
683 /* Setup duration field for the first fragment of the frame. Duration 704 /* Setup duration field for the first fragment of the frame. Duration
684 * for remaining fragments will be updated when they are being sent 705 * for remaining fragments will be updated when they are being sent
685 * to low-level driver in ieee80211_tx(). */ 706 * to low-level driver in ieee80211_tx(). */
686 dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1), 707 dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1),
687 (tx->flags & IEEE80211_TXRXD_FRAGMENTED) ? 708 (tx->flags & IEEE80211_TX_FRAGMENTED) ?
688 tx->u.tx.extra_frag[0]->len : 0); 709 tx->extra_frag[0]->len : 0);
689 hdr->duration_id = cpu_to_le16(dur); 710 hdr->duration_id = cpu_to_le16(dur);
690 711
691 if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) || 712 if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) ||
692 (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { 713 (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) {
693 struct ieee80211_rate *rate; 714 struct ieee80211_supported_band *sband;
715 struct ieee80211_rate *rate, *baserate;
716 int idx;
717
718 sband = tx->local->hw.wiphy->bands[
719 tx->local->hw.conf.channel->band];
694 720
695 /* Do not use multiple retry rates when using RTS/CTS */ 721 /* Do not use multiple retry rates when using RTS/CTS */
696 control->alt_retry_rate = -1; 722 control->alt_retry_rate = NULL;
697 723
698 /* Use min(data rate, max base rate) as CTS/RTS rate */ 724 /* Use min(data rate, max base rate) as CTS/RTS rate */
699 rate = tx->u.tx.rate; 725 rate = tx->rate;
700 while (rate > mode->rates && 726 baserate = NULL;
701 !(rate->flags & IEEE80211_RATE_BASIC)) 727
702 rate--; 728 for (idx = 0; idx < sband->n_bitrates; idx++) {
729 if (sband->bitrates[idx].bitrate > rate->bitrate)
730 continue;
731 if (tx->sdata->basic_rates & BIT(idx) &&
732 (!baserate ||
733 (baserate->bitrate < sband->bitrates[idx].bitrate)))
734 baserate = &sband->bitrates[idx];
735 }
703 736
704 control->rts_cts_rate = rate->val; 737 if (baserate)
705 control->rts_rate = rate; 738 control->rts_cts_rate = baserate;
739 else
740 control->rts_cts_rate = &sband->bitrates[0];
706 } 741 }
707 742
708 if (tx->sta) { 743 if (tx->sta) {
744 control->aid = tx->sta->aid;
709 tx->sta->tx_packets++; 745 tx->sta->tx_packets++;
710 tx->sta->tx_fragments++; 746 tx->sta->tx_fragments++;
711 tx->sta->tx_bytes += tx->skb->len; 747 tx->sta->tx_bytes += tx->skb->len;
712 if (tx->u.tx.extra_frag) { 748 if (tx->extra_frag) {
713 int i; 749 int i;
714 tx->sta->tx_fragments += tx->u.tx.num_extra_frag; 750 tx->sta->tx_fragments += tx->num_extra_frag;
715 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 751 for (i = 0; i < tx->num_extra_frag; i++) {
716 tx->sta->tx_bytes += 752 tx->sta->tx_bytes +=
717 tx->u.tx.extra_frag[i]->len; 753 tx->extra_frag[i]->len;
718 } 754 }
719 } 755 }
720 } 756 }
721 757
722 return TXRX_CONTINUE; 758 return TX_CONTINUE;
723} 759}
724 760
725static ieee80211_txrx_result 761static ieee80211_tx_result
726ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx) 762ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx)
727{ 763{
728 struct ieee80211_local *local = tx->local; 764 struct ieee80211_local *local = tx->local;
729 struct ieee80211_hw_mode *mode = tx->u.tx.mode;
730 struct sk_buff *skb = tx->skb; 765 struct sk_buff *skb = tx->skb;
731 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 766 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
732 u32 load = 0, hdrtime; 767 u32 load = 0, hdrtime;
768 struct ieee80211_rate *rate = tx->rate;
733 769
734 /* TODO: this could be part of tx_status handling, so that the number 770 /* TODO: this could be part of tx_status handling, so that the number
735 * of retries would be known; TX rate should in that case be stored 771 * of retries would be known; TX rate should in that case be stored
@@ -740,9 +776,9 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx)
740 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, 776 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
741 * 1 usec = 1/8 * (1080 / 10) = 13.5 */ 777 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
742 778
743 if (mode->mode == MODE_IEEE80211A || 779 if (tx->channel->band == IEEE80211_BAND_5GHZ ||
744 (mode->mode == MODE_IEEE80211G && 780 (tx->channel->band == IEEE80211_BAND_2GHZ &&
745 tx->u.tx.rate->flags & IEEE80211_RATE_ERP)) 781 rate->flags & IEEE80211_RATE_ERP_G))
746 hdrtime = CHAN_UTIL_HDR_SHORT; 782 hdrtime = CHAN_UTIL_HDR_SHORT;
747 else 783 else
748 hdrtime = CHAN_UTIL_HDR_LONG; 784 hdrtime = CHAN_UTIL_HDR_LONG;
@@ -751,19 +787,20 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx)
751 if (!is_multicast_ether_addr(hdr->addr1)) 787 if (!is_multicast_ether_addr(hdr->addr1))
752 load += hdrtime; 788 load += hdrtime;
753 789
754 if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_RTS_CTS) 790 if (tx->control->flags & IEEE80211_TXCTL_USE_RTS_CTS)
755 load += 2 * hdrtime; 791 load += 2 * hdrtime;
756 else if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 792 else if (tx->control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
757 load += hdrtime; 793 load += hdrtime;
758 794
759 load += skb->len * tx->u.tx.rate->rate_inv; 795 /* TODO: optimise again */
796 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
760 797
761 if (tx->u.tx.extra_frag) { 798 if (tx->extra_frag) {
762 int i; 799 int i;
763 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 800 for (i = 0; i < tx->num_extra_frag; i++) {
764 load += 2 * hdrtime; 801 load += 2 * hdrtime;
765 load += tx->u.tx.extra_frag[i]->len * 802 load += tx->extra_frag[i]->len *
766 tx->u.tx.rate->rate; 803 tx->rate->bitrate;
767 } 804 }
768 } 805 }
769 806
@@ -774,13 +811,12 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx)
774 tx->sta->channel_use_raw += load; 811 tx->sta->channel_use_raw += load;
775 tx->sdata->channel_use_raw += load; 812 tx->sdata->channel_use_raw += load;
776 813
777 return TXRX_CONTINUE; 814 return TX_CONTINUE;
778} 815}
779 816
780/* TODO: implement register/unregister functions for adding TX/RX handlers
781 * into ordered list */
782 817
783ieee80211_tx_handler ieee80211_tx_handlers[] = 818typedef ieee80211_tx_result (*ieee80211_tx_handler)(struct ieee80211_tx_data *);
819static ieee80211_tx_handler ieee80211_tx_handlers[] =
784{ 820{
785 ieee80211_tx_h_check_assoc, 821 ieee80211_tx_h_check_assoc,
786 ieee80211_tx_h_sequence, 822 ieee80211_tx_h_sequence,
@@ -801,8 +837,8 @@ ieee80211_tx_handler ieee80211_tx_handlers[] =
801 * deal with packet injection down monitor interface 837 * deal with packet injection down monitor interface
802 * with Radiotap Header -- only called for monitor mode interface 838 * with Radiotap Header -- only called for monitor mode interface
803 */ 839 */
804static ieee80211_txrx_result 840static ieee80211_tx_result
805__ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, 841__ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
806 struct sk_buff *skb) 842 struct sk_buff *skb)
807{ 843{
808 /* 844 /*
@@ -816,13 +852,15 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
816 struct ieee80211_radiotap_iterator iterator; 852 struct ieee80211_radiotap_iterator iterator;
817 struct ieee80211_radiotap_header *rthdr = 853 struct ieee80211_radiotap_header *rthdr =
818 (struct ieee80211_radiotap_header *) skb->data; 854 (struct ieee80211_radiotap_header *) skb->data;
819 struct ieee80211_hw_mode *mode = tx->local->hw.conf.mode; 855 struct ieee80211_supported_band *sband;
820 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); 856 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
821 struct ieee80211_tx_control *control = tx->u.tx.control; 857 struct ieee80211_tx_control *control = tx->control;
858
859 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band];
822 860
823 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 861 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
824 tx->flags |= IEEE80211_TXRXD_TX_INJECTED; 862 tx->flags |= IEEE80211_TX_INJECTED;
825 tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED; 863 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
826 864
827 /* 865 /*
828 * for every radiotap entry that is present 866 * for every radiotap entry that is present
@@ -852,11 +890,13 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
852 * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps 890 * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps
853 */ 891 */
854 target_rate = (*iterator.this_arg) * 5; 892 target_rate = (*iterator.this_arg) * 5;
855 for (i = 0; i < mode->num_rates; i++) { 893 for (i = 0; i < sband->n_bitrates; i++) {
856 struct ieee80211_rate *r = &mode->rates[i]; 894 struct ieee80211_rate *r;
895
896 r = &sband->bitrates[i];
857 897
858 if (r->rate == target_rate) { 898 if (r->bitrate == target_rate) {
859 tx->u.tx.rate = r; 899 tx->rate = r;
860 break; 900 break;
861 } 901 }
862 } 902 }
@@ -870,9 +910,11 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
870 control->antenna_sel_tx = (*iterator.this_arg) + 1; 910 control->antenna_sel_tx = (*iterator.this_arg) + 1;
871 break; 911 break;
872 912
913#if 0
873 case IEEE80211_RADIOTAP_DBM_TX_POWER: 914 case IEEE80211_RADIOTAP_DBM_TX_POWER:
874 control->power_level = *iterator.this_arg; 915 control->power_level = *iterator.this_arg;
875 break; 916 break;
917#endif
876 918
877 case IEEE80211_RADIOTAP_FLAGS: 919 case IEEE80211_RADIOTAP_FLAGS:
878 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { 920 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
@@ -884,7 +926,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
884 * on transmission 926 * on transmission
885 */ 927 */
886 if (skb->len < (iterator.max_length + FCS_LEN)) 928 if (skb->len < (iterator.max_length + FCS_LEN))
887 return TXRX_DROP; 929 return TX_DROP;
888 930
889 skb_trim(skb, skb->len - FCS_LEN); 931 skb_trim(skb, skb->len - FCS_LEN);
890 } 932 }
@@ -892,7 +934,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
892 control->flags &= 934 control->flags &=
893 ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; 935 ~IEEE80211_TXCTL_DO_NOT_ENCRYPT;
894 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) 936 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
895 tx->flags |= IEEE80211_TXRXD_FRAGMENTED; 937 tx->flags |= IEEE80211_TX_FRAGMENTED;
896 break; 938 break;
897 939
898 /* 940 /*
@@ -907,7 +949,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
907 } 949 }
908 950
909 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ 951 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
910 return TXRX_DROP; 952 return TX_DROP;
911 953
912 /* 954 /*
913 * remove the radiotap header 955 * remove the radiotap header
@@ -916,14 +958,14 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
916 */ 958 */
917 skb_pull(skb, iterator.max_length); 959 skb_pull(skb, iterator.max_length);
918 960
919 return TXRX_CONTINUE; 961 return TX_CONTINUE;
920} 962}
921 963
922/* 964/*
923 * initialises @tx 965 * initialises @tx
924 */ 966 */
925static ieee80211_txrx_result 967static ieee80211_tx_result
926__ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, 968__ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
927 struct sk_buff *skb, 969 struct sk_buff *skb,
928 struct net_device *dev, 970 struct net_device *dev,
929 struct ieee80211_tx_control *control) 971 struct ieee80211_tx_control *control)
@@ -939,18 +981,18 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
939 tx->dev = dev; /* use original interface */ 981 tx->dev = dev; /* use original interface */
940 tx->local = local; 982 tx->local = local;
941 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); 983 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev);
942 tx->u.tx.control = control; 984 tx->control = control;
943 /* 985 /*
944 * Set this flag (used below to indicate "automatic fragmentation"), 986 * Set this flag (used below to indicate "automatic fragmentation"),
945 * it will be cleared/left by radiotap as desired. 987 * it will be cleared/left by radiotap as desired.
946 */ 988 */
947 tx->flags |= IEEE80211_TXRXD_FRAGMENTED; 989 tx->flags |= IEEE80211_TX_FRAGMENTED;
948 990
949 /* process and remove the injection radiotap header */ 991 /* process and remove the injection radiotap header */
950 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 992 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
951 if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) { 993 if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) {
952 if (__ieee80211_parse_tx_radiotap(tx, skb) == TXRX_DROP) 994 if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP)
953 return TXRX_DROP; 995 return TX_DROP;
954 996
955 /* 997 /*
956 * __ieee80211_parse_tx_radiotap has now removed 998 * __ieee80211_parse_tx_radiotap has now removed
@@ -965,27 +1007,27 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
965 tx->fc = le16_to_cpu(hdr->frame_control); 1007 tx->fc = le16_to_cpu(hdr->frame_control);
966 1008
967 if (is_multicast_ether_addr(hdr->addr1)) { 1009 if (is_multicast_ether_addr(hdr->addr1)) {
968 tx->flags &= ~IEEE80211_TXRXD_TXUNICAST; 1010 tx->flags &= ~IEEE80211_TX_UNICAST;
969 control->flags |= IEEE80211_TXCTL_NO_ACK; 1011 control->flags |= IEEE80211_TXCTL_NO_ACK;
970 } else { 1012 } else {
971 tx->flags |= IEEE80211_TXRXD_TXUNICAST; 1013 tx->flags |= IEEE80211_TX_UNICAST;
972 control->flags &= ~IEEE80211_TXCTL_NO_ACK; 1014 control->flags &= ~IEEE80211_TXCTL_NO_ACK;
973 } 1015 }
974 1016
975 if (tx->flags & IEEE80211_TXRXD_FRAGMENTED) { 1017 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
976 if ((tx->flags & IEEE80211_TXRXD_TXUNICAST) && 1018 if ((tx->flags & IEEE80211_TX_UNICAST) &&
977 skb->len + FCS_LEN > local->fragmentation_threshold && 1019 skb->len + FCS_LEN > local->fragmentation_threshold &&
978 !local->ops->set_frag_threshold) 1020 !local->ops->set_frag_threshold)
979 tx->flags |= IEEE80211_TXRXD_FRAGMENTED; 1021 tx->flags |= IEEE80211_TX_FRAGMENTED;
980 else 1022 else
981 tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED; 1023 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
982 } 1024 }
983 1025
984 if (!tx->sta) 1026 if (!tx->sta)
985 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; 1027 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT;
986 else if (tx->sta->clear_dst_mask) { 1028 else if (tx->sta->flags & WLAN_STA_CLEAR_PS_FILT) {
987 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; 1029 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT;
988 tx->sta->clear_dst_mask = 0; 1030 tx->sta->flags &= ~WLAN_STA_CLEAR_PS_FILT;
989 } 1031 }
990 1032
991 hdrlen = ieee80211_get_hdrlen(tx->fc); 1033 hdrlen = ieee80211_get_hdrlen(tx->fc);
@@ -995,13 +1037,13 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
995 } 1037 }
996 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; 1038 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT;
997 1039
998 return TXRX_CONTINUE; 1040 return TX_CONTINUE;
999} 1041}
1000 1042
1001/* 1043/*
1002 * NB: @tx is uninitialised when passed in here 1044 * NB: @tx is uninitialised when passed in here
1003 */ 1045 */
1004static int ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, 1046static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1005 struct sk_buff *skb, 1047 struct sk_buff *skb,
1006 struct net_device *mdev, 1048 struct net_device *mdev,
1007 struct ieee80211_tx_control *control) 1049 struct ieee80211_tx_control *control)
@@ -1024,9 +1066,9 @@ static int ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
1024} 1066}
1025 1067
1026static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1068static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1027 struct ieee80211_txrx_data *tx) 1069 struct ieee80211_tx_data *tx)
1028{ 1070{
1029 struct ieee80211_tx_control *control = tx->u.tx.control; 1071 struct ieee80211_tx_control *control = tx->control;
1030 int ret, i; 1072 int ret, i;
1031 1073
1032 if (!ieee80211_qdisc_installed(local->mdev) && 1074 if (!ieee80211_qdisc_installed(local->mdev) &&
@@ -1043,20 +1085,20 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1043 local->mdev->trans_start = jiffies; 1085 local->mdev->trans_start = jiffies;
1044 ieee80211_led_tx(local, 1); 1086 ieee80211_led_tx(local, 1);
1045 } 1087 }
1046 if (tx->u.tx.extra_frag) { 1088 if (tx->extra_frag) {
1047 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | 1089 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS |
1048 IEEE80211_TXCTL_USE_CTS_PROTECT | 1090 IEEE80211_TXCTL_USE_CTS_PROTECT |
1049 IEEE80211_TXCTL_CLEAR_DST_MASK | 1091 IEEE80211_TXCTL_CLEAR_PS_FILT |
1050 IEEE80211_TXCTL_FIRST_FRAGMENT); 1092 IEEE80211_TXCTL_FIRST_FRAGMENT);
1051 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 1093 for (i = 0; i < tx->num_extra_frag; i++) {
1052 if (!tx->u.tx.extra_frag[i]) 1094 if (!tx->extra_frag[i])
1053 continue; 1095 continue;
1054 if (__ieee80211_queue_stopped(local, control->queue)) 1096 if (__ieee80211_queue_stopped(local, control->queue))
1055 return IEEE80211_TX_FRAG_AGAIN; 1097 return IEEE80211_TX_FRAG_AGAIN;
1056 if (i == tx->u.tx.num_extra_frag) { 1098 if (i == tx->num_extra_frag) {
1057 control->tx_rate = tx->u.tx.last_frag_hwrate; 1099 control->tx_rate = tx->last_frag_rate;
1058 control->rate = tx->u.tx.last_frag_rate; 1100
1059 if (tx->flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG) 1101 if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG)
1060 control->flags |= 1102 control->flags |=
1061 IEEE80211_TXCTL_RATE_CTRL_PROBE; 1103 IEEE80211_TXCTL_RATE_CTRL_PROBE;
1062 else 1104 else
@@ -1066,18 +1108,18 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1066 1108
1067 ieee80211_dump_frame(wiphy_name(local->hw.wiphy), 1109 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1068 "TX to low-level driver", 1110 "TX to low-level driver",
1069 tx->u.tx.extra_frag[i]); 1111 tx->extra_frag[i]);
1070 ret = local->ops->tx(local_to_hw(local), 1112 ret = local->ops->tx(local_to_hw(local),
1071 tx->u.tx.extra_frag[i], 1113 tx->extra_frag[i],
1072 control); 1114 control);
1073 if (ret) 1115 if (ret)
1074 return IEEE80211_TX_FRAG_AGAIN; 1116 return IEEE80211_TX_FRAG_AGAIN;
1075 local->mdev->trans_start = jiffies; 1117 local->mdev->trans_start = jiffies;
1076 ieee80211_led_tx(local, 1); 1118 ieee80211_led_tx(local, 1);
1077 tx->u.tx.extra_frag[i] = NULL; 1119 tx->extra_frag[i] = NULL;
1078 } 1120 }
1079 kfree(tx->u.tx.extra_frag); 1121 kfree(tx->extra_frag);
1080 tx->u.tx.extra_frag = NULL; 1122 tx->extra_frag = NULL;
1081 } 1123 }
1082 return IEEE80211_TX_OK; 1124 return IEEE80211_TX_OK;
1083} 1125}
@@ -1088,8 +1130,8 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1088 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1130 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1089 struct sta_info *sta; 1131 struct sta_info *sta;
1090 ieee80211_tx_handler *handler; 1132 ieee80211_tx_handler *handler;
1091 struct ieee80211_txrx_data tx; 1133 struct ieee80211_tx_data tx;
1092 ieee80211_txrx_result res = TXRX_DROP, res_prepare; 1134 ieee80211_tx_result res = TX_DROP, res_prepare;
1093 int ret, i; 1135 int ret, i;
1094 1136
1095 WARN_ON(__ieee80211_queue_pending(local, control->queue)); 1137 WARN_ON(__ieee80211_queue_pending(local, control->queue));
@@ -1099,59 +1141,52 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1099 return 0; 1141 return 0;
1100 } 1142 }
1101 1143
1144 rcu_read_lock();
1145
1102 /* initialises tx */ 1146 /* initialises tx */
1103 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); 1147 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control);
1104 1148
1105 if (res_prepare == TXRX_DROP) { 1149 if (res_prepare == TX_DROP) {
1106 dev_kfree_skb(skb); 1150 dev_kfree_skb(skb);
1151 rcu_read_unlock();
1107 return 0; 1152 return 0;
1108 } 1153 }
1109 1154
1110 /*
1111 * key references are protected using RCU and this requires that
1112 * we are in a read-site RCU section during receive processing
1113 */
1114 rcu_read_lock();
1115
1116 sta = tx.sta; 1155 sta = tx.sta;
1117 tx.u.tx.mode = local->hw.conf.mode; 1156 tx.channel = local->hw.conf.channel;
1118 1157
1119 for (handler = local->tx_handlers; *handler != NULL; 1158 for (handler = ieee80211_tx_handlers; *handler != NULL;
1120 handler++) { 1159 handler++) {
1121 res = (*handler)(&tx); 1160 res = (*handler)(&tx);
1122 if (res != TXRX_CONTINUE) 1161 if (res != TX_CONTINUE)
1123 break; 1162 break;
1124 } 1163 }
1125 1164
1126 skb = tx.skb; /* handlers are allowed to change skb */ 1165 skb = tx.skb; /* handlers are allowed to change skb */
1127 1166
1128 if (sta) 1167 if (unlikely(res == TX_DROP)) {
1129 sta_info_put(sta);
1130
1131 if (unlikely(res == TXRX_DROP)) {
1132 I802_DEBUG_INC(local->tx_handlers_drop); 1168 I802_DEBUG_INC(local->tx_handlers_drop);
1133 goto drop; 1169 goto drop;
1134 } 1170 }
1135 1171
1136 if (unlikely(res == TXRX_QUEUED)) { 1172 if (unlikely(res == TX_QUEUED)) {
1137 I802_DEBUG_INC(local->tx_handlers_queued); 1173 I802_DEBUG_INC(local->tx_handlers_queued);
1138 rcu_read_unlock(); 1174 rcu_read_unlock();
1139 return 0; 1175 return 0;
1140 } 1176 }
1141 1177
1142 if (tx.u.tx.extra_frag) { 1178 if (tx.extra_frag) {
1143 for (i = 0; i < tx.u.tx.num_extra_frag; i++) { 1179 for (i = 0; i < tx.num_extra_frag; i++) {
1144 int next_len, dur; 1180 int next_len, dur;
1145 struct ieee80211_hdr *hdr = 1181 struct ieee80211_hdr *hdr =
1146 (struct ieee80211_hdr *) 1182 (struct ieee80211_hdr *)
1147 tx.u.tx.extra_frag[i]->data; 1183 tx.extra_frag[i]->data;
1148 1184
1149 if (i + 1 < tx.u.tx.num_extra_frag) { 1185 if (i + 1 < tx.num_extra_frag) {
1150 next_len = tx.u.tx.extra_frag[i + 1]->len; 1186 next_len = tx.extra_frag[i + 1]->len;
1151 } else { 1187 } else {
1152 next_len = 0; 1188 next_len = 0;
1153 tx.u.tx.rate = tx.u.tx.last_frag_rate; 1189 tx.rate = tx.last_frag_rate;
1154 tx.u.tx.last_frag_hwrate = tx.u.tx.rate->val;
1155 } 1190 }
1156 dur = ieee80211_duration(&tx, 0, next_len); 1191 dur = ieee80211_duration(&tx, 0, next_len);
1157 hdr->duration_id = cpu_to_le16(dur); 1192 hdr->duration_id = cpu_to_le16(dur);
@@ -1186,12 +1221,11 @@ retry:
1186 memcpy(&store->control, control, 1221 memcpy(&store->control, control,
1187 sizeof(struct ieee80211_tx_control)); 1222 sizeof(struct ieee80211_tx_control));
1188 store->skb = skb; 1223 store->skb = skb;
1189 store->extra_frag = tx.u.tx.extra_frag; 1224 store->extra_frag = tx.extra_frag;
1190 store->num_extra_frag = tx.u.tx.num_extra_frag; 1225 store->num_extra_frag = tx.num_extra_frag;
1191 store->last_frag_hwrate = tx.u.tx.last_frag_hwrate; 1226 store->last_frag_rate = tx.last_frag_rate;
1192 store->last_frag_rate = tx.u.tx.last_frag_rate;
1193 store->last_frag_rate_ctrl_probe = 1227 store->last_frag_rate_ctrl_probe =
1194 !!(tx.flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG); 1228 !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG);
1195 } 1229 }
1196 rcu_read_unlock(); 1230 rcu_read_unlock();
1197 return 0; 1231 return 0;
@@ -1199,10 +1233,10 @@ retry:
1199 drop: 1233 drop:
1200 if (skb) 1234 if (skb)
1201 dev_kfree_skb(skb); 1235 dev_kfree_skb(skb);
1202 for (i = 0; i < tx.u.tx.num_extra_frag; i++) 1236 for (i = 0; i < tx.num_extra_frag; i++)
1203 if (tx.u.tx.extra_frag[i]) 1237 if (tx.extra_frag[i])
1204 dev_kfree_skb(tx.u.tx.extra_frag[i]); 1238 dev_kfree_skb(tx.extra_frag[i]);
1205 kfree(tx.u.tx.extra_frag); 1239 kfree(tx.extra_frag);
1206 rcu_read_unlock(); 1240 rcu_read_unlock();
1207 return 0; 1241 return 0;
1208} 1242}
@@ -1260,6 +1294,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1260 control.flags |= IEEE80211_TXCTL_REQUEUE; 1294 control.flags |= IEEE80211_TXCTL_REQUEUE;
1261 if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME) 1295 if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME)
1262 control.flags |= IEEE80211_TXCTL_EAPOL_FRAME; 1296 control.flags |= IEEE80211_TXCTL_EAPOL_FRAME;
1297 if (pkt_data->flags & IEEE80211_TXPD_AMPDU)
1298 control.flags |= IEEE80211_TXCTL_AMPDU;
1263 control.queue = pkt_data->queue; 1299 control.queue = pkt_data->queue;
1264 1300
1265 ret = ieee80211_tx(odev, skb, &control); 1301 ret = ieee80211_tx(odev, skb, &control);
@@ -1346,8 +1382,9 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1346 struct ieee80211_tx_packet_data *pkt_data; 1382 struct ieee80211_tx_packet_data *pkt_data;
1347 struct ieee80211_sub_if_data *sdata; 1383 struct ieee80211_sub_if_data *sdata;
1348 int ret = 1, head_need; 1384 int ret = 1, head_need;
1349 u16 ethertype, hdrlen, fc; 1385 u16 ethertype, hdrlen, meshhdrlen = 0, fc;
1350 struct ieee80211_hdr hdr; 1386 struct ieee80211_hdr hdr;
1387 struct ieee80211s_hdr mesh_hdr;
1351 const u8 *encaps_data; 1388 const u8 *encaps_data;
1352 int encaps_len, skip_header_bytes; 1389 int encaps_len, skip_header_bytes;
1353 int nh_pos, h_pos; 1390 int nh_pos, h_pos;
@@ -1389,6 +1426,37 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1389 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1426 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1390 hdrlen = 30; 1427 hdrlen = 30;
1391 break; 1428 break;
1429#ifdef CONFIG_MAC80211_MESH
1430 case IEEE80211_IF_TYPE_MESH_POINT:
1431 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
1432 /* RA TA DA SA */
1433 if (is_multicast_ether_addr(skb->data))
1434 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1435 else if (mesh_nexthop_lookup(hdr.addr1, skb, dev))
1436 return 0;
1437 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1438 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1439 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1440 if (skb->pkt_type == PACKET_OTHERHOST) {
1441 /* Forwarded frame, keep mesh ttl and seqnum */
1442 struct ieee80211s_hdr *prev_meshhdr;
1443 prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb);
1444 meshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr);
1445 memcpy(&mesh_hdr, prev_meshhdr, meshhdrlen);
1446 sdata->u.sta.mshstats.fwded_frames++;
1447 } else {
1448 if (!sdata->u.sta.mshcfg.dot11MeshTTL) {
1449 /* Do not send frames with mesh_ttl == 0 */
1450 sdata->u.sta.mshstats.dropped_frames_ttl++;
1451 ret = 0;
1452 goto fail;
1453 }
1454 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1455 sdata);
1456 }
1457 hdrlen = 30;
1458 break;
1459#endif
1392 case IEEE80211_IF_TYPE_STA: 1460 case IEEE80211_IF_TYPE_STA:
1393 fc |= IEEE80211_FCTL_TODS; 1461 fc |= IEEE80211_FCTL_TODS;
1394 /* BSSID SA DA */ 1462 /* BSSID SA DA */
@@ -1409,10 +1477,17 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1409 goto fail; 1477 goto fail;
1410 } 1478 }
1411 1479
1412 sta = sta_info_get(local, hdr.addr1); 1480 /*
1413 if (sta) { 1481 * There's no need to try to look up the destination
1414 sta_flags = sta->flags; 1482 * if it is a multicast address (which can only happen
1415 sta_info_put(sta); 1483 * in AP mode)
1484 */
1485 if (!is_multicast_ether_addr(hdr.addr1)) {
1486 rcu_read_lock();
1487 sta = sta_info_get(local, hdr.addr1);
1488 if (sta)
1489 sta_flags = sta->flags;
1490 rcu_read_unlock();
1416 } 1491 }
1417 1492
1418 /* receiver is QoS enabled, use a QoS type frame */ 1493 /* receiver is QoS enabled, use a QoS type frame */
@@ -1422,12 +1497,12 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1422 } 1497 }
1423 1498
1424 /* 1499 /*
1425 * If port access control is enabled, drop frames to unauthorised 1500 * Drop unicast frames to unauthorised stations unless they are
1426 * stations unless they are EAPOL frames from the local station. 1501 * EAPOL frames from the local station.
1427 */ 1502 */
1428 if (unlikely(sdata->ieee802_1x_pac && 1503 if (unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1429 !(sta_flags & WLAN_STA_AUTHORIZED) && 1504 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1430 !(ethertype == ETH_P_PAE && 1505 !(ethertype == ETH_P_PAE &&
1431 compare_ether_addr(dev->dev_addr, 1506 compare_ether_addr(dev->dev_addr,
1432 skb->data + ETH_ALEN) == 0))) { 1507 skb->data + ETH_ALEN) == 0))) {
1433#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1508#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -1480,7 +1555,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1480 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and 1555 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
1481 * alloc_skb() (net/core/skbuff.c) 1556 * alloc_skb() (net/core/skbuff.c)
1482 */ 1557 */
1483 head_need = hdrlen + encaps_len + local->tx_headroom; 1558 head_need = hdrlen + encaps_len + meshhdrlen + local->tx_headroom;
1484 head_need -= skb_headroom(skb); 1559 head_need -= skb_headroom(skb);
1485 1560
1486 /* We are going to modify skb data, so make a copy of it if happens to 1561 /* We are going to modify skb data, so make a copy of it if happens to
@@ -1514,6 +1589,12 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1514 h_pos += encaps_len; 1589 h_pos += encaps_len;
1515 } 1590 }
1516 1591
1592 if (meshhdrlen > 0) {
1593 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
1594 nh_pos += meshhdrlen;
1595 h_pos += meshhdrlen;
1596 }
1597
1517 if (fc & IEEE80211_STYPE_QOS_DATA) { 1598 if (fc & IEEE80211_STYPE_QOS_DATA) {
1518 __le16 *qos_control; 1599 __le16 *qos_control;
1519 1600
@@ -1583,7 +1664,7 @@ void ieee80211_tx_pending(unsigned long data)
1583 struct ieee80211_local *local = (struct ieee80211_local *)data; 1664 struct ieee80211_local *local = (struct ieee80211_local *)data;
1584 struct net_device *dev = local->mdev; 1665 struct net_device *dev = local->mdev;
1585 struct ieee80211_tx_stored_packet *store; 1666 struct ieee80211_tx_stored_packet *store;
1586 struct ieee80211_txrx_data tx; 1667 struct ieee80211_tx_data tx;
1587 int i, ret, reschedule = 0; 1668 int i, ret, reschedule = 0;
1588 1669
1589 netif_tx_lock_bh(dev); 1670 netif_tx_lock_bh(dev);
@@ -1595,14 +1676,13 @@ void ieee80211_tx_pending(unsigned long data)
1595 continue; 1676 continue;
1596 } 1677 }
1597 store = &local->pending_packet[i]; 1678 store = &local->pending_packet[i];
1598 tx.u.tx.control = &store->control; 1679 tx.control = &store->control;
1599 tx.u.tx.extra_frag = store->extra_frag; 1680 tx.extra_frag = store->extra_frag;
1600 tx.u.tx.num_extra_frag = store->num_extra_frag; 1681 tx.num_extra_frag = store->num_extra_frag;
1601 tx.u.tx.last_frag_hwrate = store->last_frag_hwrate; 1682 tx.last_frag_rate = store->last_frag_rate;
1602 tx.u.tx.last_frag_rate = store->last_frag_rate;
1603 tx.flags = 0; 1683 tx.flags = 0;
1604 if (store->last_frag_rate_ctrl_probe) 1684 if (store->last_frag_rate_ctrl_probe)
1605 tx.flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; 1685 tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG;
1606 ret = __ieee80211_tx(local, store->skb, &tx); 1686 ret = __ieee80211_tx(local, store->skb, &tx);
1607 if (ret) { 1687 if (ret) {
1608 if (ret == IEEE80211_TX_FRAG_AGAIN) 1688 if (ret == IEEE80211_TX_FRAG_AGAIN)
@@ -1636,7 +1716,6 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local,
1636 1716
1637 /* Generate bitmap for TIM only if there are any STAs in power save 1717 /* Generate bitmap for TIM only if there are any STAs in power save
1638 * mode. */ 1718 * mode. */
1639 read_lock_bh(&local->sta_lock);
1640 if (atomic_read(&bss->num_sta_ps) > 0) 1719 if (atomic_read(&bss->num_sta_ps) > 0)
1641 /* in the hope that this is faster than 1720 /* in the hope that this is faster than
1642 * checking byte-for-byte */ 1721 * checking byte-for-byte */
@@ -1687,7 +1766,6 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local,
1687 *pos++ = aid0; /* Bitmap control */ 1766 *pos++ = aid0; /* Bitmap control */
1688 *pos++ = 0; /* Part Virt Bitmap */ 1767 *pos++ = 0; /* Part Virt Bitmap */
1689 } 1768 }
1690 read_unlock_bh(&local->sta_lock);
1691} 1769}
1692 1770
1693struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, 1771struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
@@ -1701,16 +1779,96 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1701 struct ieee80211_if_ap *ap = NULL; 1779 struct ieee80211_if_ap *ap = NULL;
1702 struct rate_selection rsel; 1780 struct rate_selection rsel;
1703 struct beacon_data *beacon; 1781 struct beacon_data *beacon;
1782 struct ieee80211_supported_band *sband;
1783 struct ieee80211_mgmt *mgmt;
1784 int *num_beacons;
1785 bool err = true;
1786 u8 *pos;
1787
1788 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1704 1789
1705 rcu_read_lock(); 1790 rcu_read_lock();
1706 1791
1707 sdata = vif_to_sdata(vif); 1792 sdata = vif_to_sdata(vif);
1708 bdev = sdata->dev; 1793 bdev = sdata->dev;
1709 ap = &sdata->u.ap;
1710 1794
1711 beacon = rcu_dereference(ap->beacon); 1795 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
1796 ap = &sdata->u.ap;
1797 beacon = rcu_dereference(ap->beacon);
1798 if (ap && beacon) {
1799 /*
1800 * headroom, head length,
1801 * tail length and maximum TIM length
1802 */
1803 skb = dev_alloc_skb(local->tx_headroom +
1804 beacon->head_len +
1805 beacon->tail_len + 256);
1806 if (!skb)
1807 goto out;
1808
1809 skb_reserve(skb, local->tx_headroom);
1810 memcpy(skb_put(skb, beacon->head_len), beacon->head,
1811 beacon->head_len);
1812
1813 ieee80211_include_sequence(sdata,
1814 (struct ieee80211_hdr *)skb->data);
1815
1816 /*
1817 * Not very nice, but we want to allow the driver to call
1818 * ieee80211_beacon_get() as a response to the set_tim()
1819 * callback. That, however, is already invoked under the
1820 * sta_lock to guarantee consistent and race-free update
1821 * of the tim bitmap in mac80211 and the driver.
1822 */
1823 if (local->tim_in_locked_section) {
1824 ieee80211_beacon_add_tim(local, ap, skb, beacon);
1825 } else {
1826 unsigned long flags;
1827
1828 spin_lock_irqsave(&local->sta_lock, flags);
1829 ieee80211_beacon_add_tim(local, ap, skb, beacon);
1830 spin_unlock_irqrestore(&local->sta_lock, flags);
1831 }
1832
1833 if (beacon->tail)
1834 memcpy(skb_put(skb, beacon->tail_len),
1835 beacon->tail, beacon->tail_len);
1712 1836
1713 if (!ap || sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon) { 1837 num_beacons = &ap->num_beacons;
1838
1839 err = false;
1840 }
1841 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
1842 /* headroom, head length, tail length and maximum TIM length */
1843 skb = dev_alloc_skb(local->tx_headroom + 400);
1844 if (!skb)
1845 goto out;
1846
1847 skb_reserve(skb, local->hw.extra_tx_headroom);
1848 mgmt = (struct ieee80211_mgmt *)
1849 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
1850 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
1851 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1852 IEEE80211_STYPE_BEACON);
1853 memset(mgmt->da, 0xff, ETH_ALEN);
1854 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
1855 /* BSSID is left zeroed, wildcard value */
1856 mgmt->u.beacon.beacon_int =
1857 cpu_to_le16(local->hw.conf.beacon_int);
1858 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
1859
1860 pos = skb_put(skb, 2);
1861 *pos++ = WLAN_EID_SSID;
1862 *pos++ = 0x0;
1863
1864 mesh_mgmt_ies_add(skb, sdata->dev);
1865
1866 num_beacons = &sdata->u.sta.num_beacons;
1867
1868 err = false;
1869 }
1870
1871 if (err) {
1714#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1872#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1715 if (net_ratelimit()) 1873 if (net_ratelimit())
1716 printk(KERN_DEBUG "no beacon data avail for %s\n", 1874 printk(KERN_DEBUG "no beacon data avail for %s\n",
@@ -1720,27 +1878,8 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1720 goto out; 1878 goto out;
1721 } 1879 }
1722 1880
1723 /* headroom, head length, tail length and maximum TIM length */
1724 skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
1725 beacon->tail_len + 256);
1726 if (!skb)
1727 goto out;
1728
1729 skb_reserve(skb, local->tx_headroom);
1730 memcpy(skb_put(skb, beacon->head_len), beacon->head,
1731 beacon->head_len);
1732
1733 ieee80211_include_sequence(sdata, (struct ieee80211_hdr *)skb->data);
1734
1735 ieee80211_beacon_add_tim(local, ap, skb, beacon);
1736
1737 if (beacon->tail)
1738 memcpy(skb_put(skb, beacon->tail_len), beacon->tail,
1739 beacon->tail_len);
1740
1741 if (control) { 1881 if (control) {
1742 rate_control_get_rate(local->mdev, local->oper_hw_mode, skb, 1882 rate_control_get_rate(local->mdev, sband, skb, &rsel);
1743 &rsel);
1744 if (!rsel.rate) { 1883 if (!rsel.rate) {
1745 if (net_ratelimit()) { 1884 if (net_ratelimit()) {
1746 printk(KERN_DEBUG "%s: ieee80211_beacon_get: " 1885 printk(KERN_DEBUG "%s: ieee80211_beacon_get: "
@@ -1753,20 +1892,17 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1753 } 1892 }
1754 1893
1755 control->vif = vif; 1894 control->vif = vif;
1756 control->tx_rate = 1895 control->tx_rate = rsel.rate;
1757 (sdata->bss_conf.use_short_preamble && 1896 if (sdata->bss_conf.use_short_preamble &&
1758 (rsel.rate->flags & IEEE80211_RATE_PREAMBLE2)) ? 1897 rsel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
1759 rsel.rate->val2 : rsel.rate->val; 1898 control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
1760 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1899 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1761 control->power_level = local->hw.conf.power_level;
1762 control->flags |= IEEE80211_TXCTL_NO_ACK; 1900 control->flags |= IEEE80211_TXCTL_NO_ACK;
1763 control->retry_limit = 1; 1901 control->retry_limit = 1;
1764 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; 1902 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT;
1765 } 1903 }
1766 1904 (*num_beacons)++;
1767 ap->num_beacons++; 1905out:
1768
1769 out:
1770 rcu_read_unlock(); 1906 rcu_read_unlock();
1771 return skb; 1907 return skb;
1772} 1908}
@@ -1814,8 +1950,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1814 struct sk_buff *skb; 1950 struct sk_buff *skb;
1815 struct sta_info *sta; 1951 struct sta_info *sta;
1816 ieee80211_tx_handler *handler; 1952 ieee80211_tx_handler *handler;
1817 struct ieee80211_txrx_data tx; 1953 struct ieee80211_tx_data tx;
1818 ieee80211_txrx_result res = TXRX_DROP; 1954 ieee80211_tx_result res = TX_DROP;
1819 struct net_device *bdev; 1955 struct net_device *bdev;
1820 struct ieee80211_sub_if_data *sdata; 1956 struct ieee80211_sub_if_data *sdata;
1821 struct ieee80211_if_ap *bss = NULL; 1957 struct ieee80211_if_ap *bss = NULL;
@@ -1836,7 +1972,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1836 rcu_read_unlock(); 1972 rcu_read_unlock();
1837 return NULL; 1973 return NULL;
1838 } 1974 }
1839 rcu_read_unlock();
1840 1975
1841 if (bss->dtim_count != 0) 1976 if (bss->dtim_count != 0)
1842 return NULL; /* send buffered bc/mc only after DTIM beacon */ 1977 return NULL; /* send buffered bc/mc only after DTIM beacon */
@@ -1862,27 +1997,26 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1862 dev_kfree_skb_any(skb); 1997 dev_kfree_skb_any(skb);
1863 } 1998 }
1864 sta = tx.sta; 1999 sta = tx.sta;
1865 tx.flags |= IEEE80211_TXRXD_TXPS_BUFFERED; 2000 tx.flags |= IEEE80211_TX_PS_BUFFERED;
1866 tx.u.tx.mode = local->hw.conf.mode; 2001 tx.channel = local->hw.conf.channel;
1867 2002
1868 for (handler = local->tx_handlers; *handler != NULL; handler++) { 2003 for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) {
1869 res = (*handler)(&tx); 2004 res = (*handler)(&tx);
1870 if (res == TXRX_DROP || res == TXRX_QUEUED) 2005 if (res == TX_DROP || res == TX_QUEUED)
1871 break; 2006 break;
1872 } 2007 }
1873 skb = tx.skb; /* handlers are allowed to change skb */ 2008 skb = tx.skb; /* handlers are allowed to change skb */
1874 2009
1875 if (res == TXRX_DROP) { 2010 if (res == TX_DROP) {
1876 I802_DEBUG_INC(local->tx_handlers_drop); 2011 I802_DEBUG_INC(local->tx_handlers_drop);
1877 dev_kfree_skb(skb); 2012 dev_kfree_skb(skb);
1878 skb = NULL; 2013 skb = NULL;
1879 } else if (res == TXRX_QUEUED) { 2014 } else if (res == TX_QUEUED) {
1880 I802_DEBUG_INC(local->tx_handlers_queued); 2015 I802_DEBUG_INC(local->tx_handlers_queued);
1881 skb = NULL; 2016 skb = NULL;
1882 } 2017 }
1883 2018
1884 if (sta) 2019 rcu_read_unlock();
1885 sta_info_put(sta);
1886 2020
1887 return skb; 2021 return skb;
1888} 2022}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5e631ce98d7e..cc9f715c7bfc 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -25,7 +25,8 @@
25#include <net/rtnetlink.h> 25#include <net/rtnetlink.h>
26 26
27#include "ieee80211_i.h" 27#include "ieee80211_i.h"
28#include "ieee80211_rate.h" 28#include "rate.h"
29#include "mesh.h"
29#include "wme.h" 30#include "wme.h"
30 31
31/* privid for wiphys to determine whether they belong to us or not */ 32/* privid for wiphys to determine whether they belong to us or not */
@@ -41,92 +42,6 @@ const unsigned char bridge_tunnel_header[] =
41 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; 42 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
42 43
43 44
44static int rate_list_match(const int *rate_list, int rate)
45{
46 int i;
47
48 if (!rate_list)
49 return 0;
50
51 for (i = 0; rate_list[i] >= 0; i++)
52 if (rate_list[i] == rate)
53 return 1;
54
55 return 0;
56}
57
58void ieee80211_prepare_rates(struct ieee80211_local *local,
59 struct ieee80211_hw_mode *mode)
60{
61 int i;
62
63 for (i = 0; i < mode->num_rates; i++) {
64 struct ieee80211_rate *rate = &mode->rates[i];
65
66 rate->flags &= ~(IEEE80211_RATE_SUPPORTED |
67 IEEE80211_RATE_BASIC);
68
69 if (local->supp_rates[mode->mode]) {
70 if (!rate_list_match(local->supp_rates[mode->mode],
71 rate->rate))
72 continue;
73 }
74
75 rate->flags |= IEEE80211_RATE_SUPPORTED;
76
77 /* Use configured basic rate set if it is available. If not,
78 * use defaults that are sane for most cases. */
79 if (local->basic_rates[mode->mode]) {
80 if (rate_list_match(local->basic_rates[mode->mode],
81 rate->rate))
82 rate->flags |= IEEE80211_RATE_BASIC;
83 } else switch (mode->mode) {
84 case MODE_IEEE80211A:
85 if (rate->rate == 60 || rate->rate == 120 ||
86 rate->rate == 240)
87 rate->flags |= IEEE80211_RATE_BASIC;
88 break;
89 case MODE_IEEE80211B:
90 if (rate->rate == 10 || rate->rate == 20)
91 rate->flags |= IEEE80211_RATE_BASIC;
92 break;
93 case MODE_IEEE80211G:
94 if (rate->rate == 10 || rate->rate == 20 ||
95 rate->rate == 55 || rate->rate == 110)
96 rate->flags |= IEEE80211_RATE_BASIC;
97 break;
98 case NUM_IEEE80211_MODES:
99 /* not useful */
100 break;
101 }
102
103 /* Set ERP and MANDATORY flags based on phymode */
104 switch (mode->mode) {
105 case MODE_IEEE80211A:
106 if (rate->rate == 60 || rate->rate == 120 ||
107 rate->rate == 240)
108 rate->flags |= IEEE80211_RATE_MANDATORY;
109 break;
110 case MODE_IEEE80211B:
111 if (rate->rate == 10)
112 rate->flags |= IEEE80211_RATE_MANDATORY;
113 break;
114 case MODE_IEEE80211G:
115 if (rate->rate == 10 || rate->rate == 20 ||
116 rate->rate == 55 || rate->rate == 110 ||
117 rate->rate == 60 || rate->rate == 120 ||
118 rate->rate == 240)
119 rate->flags |= IEEE80211_RATE_MANDATORY;
120 break;
121 case NUM_IEEE80211_MODES:
122 /* not useful */
123 break;
124 }
125 if (ieee80211_is_erp_rate(mode->mode, rate->rate))
126 rate->flags |= IEEE80211_RATE_ERP;
127 }
128}
129
130u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
131 enum ieee80211_if_types type) 46 enum ieee80211_if_types type)
132{ 47{
@@ -232,17 +147,35 @@ int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
232} 147}
233EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); 148EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
234 149
235void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx) 150int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
151{
152 int ae = meshhdr->flags & IEEE80211S_FLAGS_AE;
153 /* 7.1.3.5a.2 */
154 switch (ae) {
155 case 0:
156 return 5;
157 case 1:
158 return 11;
159 case 2:
160 return 17;
161 case 3:
162 return 23;
163 default:
164 return 5;
165 }
166}
167
168void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
236{ 169{
237 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 170 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
238 171
239 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 172 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
240 if (tx->u.tx.extra_frag) { 173 if (tx->extra_frag) {
241 struct ieee80211_hdr *fhdr; 174 struct ieee80211_hdr *fhdr;
242 int i; 175 int i;
243 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 176 for (i = 0; i < tx->num_extra_frag; i++) {
244 fhdr = (struct ieee80211_hdr *) 177 fhdr = (struct ieee80211_hdr *)
245 tx->u.tx.extra_frag[i]->data; 178 tx->extra_frag[i]->data;
246 fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 179 fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
247 } 180 }
248 } 181 }
@@ -262,7 +195,7 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
262 * DIV_ROUND_UP() operations. 195 * DIV_ROUND_UP() operations.
263 */ 196 */
264 197
265 if (local->hw.conf.phymode == MODE_IEEE80211A || erp) { 198 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) {
266 /* 199 /*
267 * OFDM: 200 * OFDM:
268 * 201 *
@@ -304,15 +237,19 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
304/* Exported duration function for driver use */ 237/* Exported duration function for driver use */
305__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, 238__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
306 struct ieee80211_vif *vif, 239 struct ieee80211_vif *vif,
307 size_t frame_len, int rate) 240 size_t frame_len,
241 struct ieee80211_rate *rate)
308{ 242{
309 struct ieee80211_local *local = hw_to_local(hw); 243 struct ieee80211_local *local = hw_to_local(hw);
310 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 244 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
311 u16 dur; 245 u16 dur;
312 int erp; 246 int erp;
313 247
314 erp = ieee80211_is_erp_rate(hw->conf.phymode, rate); 248 erp = 0;
315 dur = ieee80211_frame_duration(local, frame_len, rate, erp, 249 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
250 erp = rate->flags & IEEE80211_RATE_ERP_G;
251
252 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp,
316 sdata->bss_conf.use_short_preamble); 253 sdata->bss_conf.use_short_preamble);
317 254
318 return cpu_to_le16(dur); 255 return cpu_to_le16(dur);
@@ -332,17 +269,20 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
332 269
333 short_preamble = sdata->bss_conf.use_short_preamble; 270 short_preamble = sdata->bss_conf.use_short_preamble;
334 271
335 rate = frame_txctl->rts_rate; 272 rate = frame_txctl->rts_cts_rate;
336 erp = !!(rate->flags & IEEE80211_RATE_ERP); 273
274 erp = 0;
275 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
276 erp = rate->flags & IEEE80211_RATE_ERP_G;
337 277
338 /* CTS duration */ 278 /* CTS duration */
339 dur = ieee80211_frame_duration(local, 10, rate->rate, 279 dur = ieee80211_frame_duration(local, 10, rate->bitrate,
340 erp, short_preamble); 280 erp, short_preamble);
341 /* Data frame duration */ 281 /* Data frame duration */
342 dur += ieee80211_frame_duration(local, frame_len, rate->rate, 282 dur += ieee80211_frame_duration(local, frame_len, rate->bitrate,
343 erp, short_preamble); 283 erp, short_preamble);
344 /* ACK duration */ 284 /* ACK duration */
345 dur += ieee80211_frame_duration(local, 10, rate->rate, 285 dur += ieee80211_frame_duration(local, 10, rate->bitrate,
346 erp, short_preamble); 286 erp, short_preamble);
347 287
348 return cpu_to_le16(dur); 288 return cpu_to_le16(dur);
@@ -363,15 +303,17 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
363 303
364 short_preamble = sdata->bss_conf.use_short_preamble; 304 short_preamble = sdata->bss_conf.use_short_preamble;
365 305
366 rate = frame_txctl->rts_rate; 306 rate = frame_txctl->rts_cts_rate;
367 erp = !!(rate->flags & IEEE80211_RATE_ERP); 307 erp = 0;
308 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
309 erp = rate->flags & IEEE80211_RATE_ERP_G;
368 310
369 /* Data frame duration */ 311 /* Data frame duration */
370 dur = ieee80211_frame_duration(local, frame_len, rate->rate, 312 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
371 erp, short_preamble); 313 erp, short_preamble);
372 if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { 314 if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) {
373 /* ACK duration */ 315 /* ACK duration */
374 dur += ieee80211_frame_duration(local, 10, rate->rate, 316 dur += ieee80211_frame_duration(local, 10, rate->bitrate,
375 erp, short_preamble); 317 erp, short_preamble);
376 } 318 }
377 319
@@ -379,27 +321,6 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
379} 321}
380EXPORT_SYMBOL(ieee80211_ctstoself_duration); 322EXPORT_SYMBOL(ieee80211_ctstoself_duration);
381 323
382struct ieee80211_rate *
383ieee80211_get_rate(struct ieee80211_local *local, int phymode, int hw_rate)
384{
385 struct ieee80211_hw_mode *mode;
386 int r;
387
388 list_for_each_entry(mode, &local->modes_list, list) {
389 if (mode->mode != phymode)
390 continue;
391 for (r = 0; r < mode->num_rates; r++) {
392 struct ieee80211_rate *rate = &mode->rates[r];
393 if (rate->val == hw_rate ||
394 (rate->flags & IEEE80211_RATE_PREAMBLE2 &&
395 rate->val2 == hw_rate))
396 return rate;
397 }
398 }
399
400 return NULL;
401}
402
403void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) 324void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
404{ 325{
405 struct ieee80211_local *local = hw_to_local(hw); 326 struct ieee80211_local *local = hw_to_local(hw);
@@ -480,6 +401,7 @@ void ieee80211_iterate_active_interfaces(
480 case IEEE80211_IF_TYPE_STA: 401 case IEEE80211_IF_TYPE_STA:
481 case IEEE80211_IF_TYPE_IBSS: 402 case IEEE80211_IF_TYPE_IBSS:
482 case IEEE80211_IF_TYPE_WDS: 403 case IEEE80211_IF_TYPE_WDS:
404 case IEEE80211_IF_TYPE_MESH_POINT:
483 break; 405 break;
484 } 406 }
485 if (sdata->dev == local->mdev) 407 if (sdata->dev == local->mdev)
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a0cff72a580b..affcecd78c10 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -305,39 +305,39 @@ u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
305 return NULL; 305 return NULL;
306} 306}
307 307
308ieee80211_txrx_result 308ieee80211_rx_result
309ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx) 309ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
310{ 310{
311 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && 311 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA &&
312 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 312 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
313 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) 313 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH))
314 return TXRX_CONTINUE; 314 return RX_CONTINUE;
315 315
316 if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { 316 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
317 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { 317 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) {
318#ifdef CONFIG_MAC80211_DEBUG 318#ifdef CONFIG_MAC80211_DEBUG
319 if (net_ratelimit()) 319 if (net_ratelimit())
320 printk(KERN_DEBUG "%s: RX WEP frame, decrypt " 320 printk(KERN_DEBUG "%s: RX WEP frame, decrypt "
321 "failed\n", rx->dev->name); 321 "failed\n", rx->dev->name);
322#endif /* CONFIG_MAC80211_DEBUG */ 322#endif /* CONFIG_MAC80211_DEBUG */
323 return TXRX_DROP; 323 return RX_DROP_UNUSABLE;
324 } 324 }
325 } else if (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) { 325 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) {
326 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 326 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
327 /* remove ICV */ 327 /* remove ICV */
328 skb_trim(rx->skb, rx->skb->len - 4); 328 skb_trim(rx->skb, rx->skb->len - 4);
329 } 329 }
330 330
331 return TXRX_CONTINUE; 331 return RX_CONTINUE;
332} 332}
333 333
334static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb) 334static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
335{ 335{
336 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { 336 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
337 if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) 337 if (ieee80211_wep_encrypt(tx->local, skb, tx->key))
338 return -1; 338 return -1;
339 } else { 339 } else {
340 tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; 340 tx->control->key_idx = tx->key->conf.hw_key_idx;
341 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { 341 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) {
342 if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) 342 if (!ieee80211_wep_add_iv(tx->local, skb, tx->key))
343 return -1; 343 return -1;
@@ -346,28 +346,28 @@ static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb)
346 return 0; 346 return 0;
347} 347}
348 348
349ieee80211_txrx_result 349ieee80211_tx_result
350ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx) 350ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
351{ 351{
352 tx->u.tx.control->iv_len = WEP_IV_LEN; 352 tx->control->iv_len = WEP_IV_LEN;
353 tx->u.tx.control->icv_len = WEP_ICV_LEN; 353 tx->control->icv_len = WEP_ICV_LEN;
354 ieee80211_tx_set_iswep(tx); 354 ieee80211_tx_set_protected(tx);
355 355
356 if (wep_encrypt_skb(tx, tx->skb) < 0) { 356 if (wep_encrypt_skb(tx, tx->skb) < 0) {
357 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); 357 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
358 return TXRX_DROP; 358 return TX_DROP;
359 } 359 }
360 360
361 if (tx->u.tx.extra_frag) { 361 if (tx->extra_frag) {
362 int i; 362 int i;
363 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 363 for (i = 0; i < tx->num_extra_frag; i++) {
364 if (wep_encrypt_skb(tx, tx->u.tx.extra_frag[i]) < 0) { 364 if (wep_encrypt_skb(tx, tx->extra_frag[i]) < 0) {
365 I802_DEBUG_INC(tx->local-> 365 I802_DEBUG_INC(tx->local->
366 tx_handlers_drop_wep); 366 tx_handlers_drop_wep);
367 return TXRX_DROP; 367 return TX_DROP;
368 } 368 }
369 } 369 }
370 } 370 }
371 371
372 return TXRX_CONTINUE; 372 return TX_CONTINUE;
373} 373}
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 785fbb4e0dd7..363779c50658 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -14,7 +14,7 @@
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include "ieee80211_i.h" 16#include "ieee80211_i.h"
17#include "ieee80211_key.h" 17#include "key.h"
18 18
19int ieee80211_wep_init(struct ieee80211_local *local); 19int ieee80211_wep_init(struct ieee80211_local *local);
20void ieee80211_wep_free(struct ieee80211_local *local); 20void ieee80211_wep_free(struct ieee80211_local *local);
@@ -28,9 +28,9 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
28 struct ieee80211_key *key); 28 struct ieee80211_key *key);
29u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); 29u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
30 30
31ieee80211_txrx_result 31ieee80211_rx_result
32ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx); 32ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx);
33ieee80211_txrx_result 33ieee80211_tx_result
34ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx); 34ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx);
35 35
36#endif /* WEP_H */ 36#endif /* WEP_H */
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/wext.c
index 5024d3733834..76e1de1dc735 100644
--- a/net/mac80211/ieee80211_ioctl.c
+++ b/net/mac80211/wext.c
@@ -21,8 +21,8 @@
21 21
22#include <net/mac80211.h> 22#include <net/mac80211.h>
23#include "ieee80211_i.h" 23#include "ieee80211_i.h"
24#include "ieee80211_led.h" 24#include "led.h"
25#include "ieee80211_rate.h" 25#include "rate.h"
26#include "wpa.h" 26#include "wpa.h"
27#include "aes_ccm.h" 27#include "aes_ccm.h"
28 28
@@ -33,10 +33,10 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr,
33 size_t key_len) 33 size_t key_len)
34{ 34{
35 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 35 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
36 int ret = 0;
37 struct sta_info *sta; 36 struct sta_info *sta;
38 struct ieee80211_key *key; 37 struct ieee80211_key *key;
39 struct ieee80211_sub_if_data *sdata; 38 struct ieee80211_sub_if_data *sdata;
39 int err;
40 40
41 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 41 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
42 42
@@ -46,59 +46,65 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr,
46 return -EINVAL; 46 return -EINVAL;
47 } 47 }
48 48
49 if (is_broadcast_ether_addr(sta_addr)) { 49 if (remove) {
50 sta = NULL; 50 rcu_read_lock();
51 key = sdata->keys[idx];
52 } else {
53 set_tx_key = 0;
54 /*
55 * According to the standard, the key index of a pairwise
56 * key must be zero. However, some AP are broken when it
57 * comes to WEP key indices, so we work around this.
58 */
59 if (idx != 0 && alg != ALG_WEP) {
60 printk(KERN_DEBUG "%s: set_encrypt - non-zero idx for "
61 "individual key\n", dev->name);
62 return -EINVAL;
63 }
64 51
65 sta = sta_info_get(local, sta_addr); 52 err = 0;
66 if (!sta) {
67#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
68 DECLARE_MAC_BUF(mac);
69 printk(KERN_DEBUG "%s: set_encrypt - unknown addr "
70 "%s\n",
71 dev->name, print_mac(mac, sta_addr));
72#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
73 53
74 return -ENOENT; 54 if (is_broadcast_ether_addr(sta_addr)) {
55 key = sdata->keys[idx];
56 } else {
57 sta = sta_info_get(local, sta_addr);
58 if (!sta) {
59 err = -ENOENT;
60 goto out_unlock;
61 }
62 key = sta->key;
75 } 63 }
76 64
77 key = sta->key;
78 }
79
80 if (remove) {
81 ieee80211_key_free(key); 65 ieee80211_key_free(key);
82 key = NULL;
83 } else { 66 } else {
84 /* 67 key = ieee80211_key_alloc(alg, idx, key_len, _key);
85 * Automatically frees any old key if present. 68 if (!key)
86 */ 69 return -ENOMEM;
87 key = ieee80211_key_alloc(sdata, sta, alg, idx, key_len, _key); 70
88 if (!key) { 71 sta = NULL;
89 ret = -ENOMEM; 72 err = 0;
90 goto err_out; 73
74 rcu_read_lock();
75
76 if (!is_broadcast_ether_addr(sta_addr)) {
77 set_tx_key = 0;
78 /*
79 * According to the standard, the key index of a
80 * pairwise key must be zero. However, some AP are
81 * broken when it comes to WEP key indices, so we
82 * work around this.
83 */
84 if (idx != 0 && alg != ALG_WEP) {
85 ieee80211_key_free(key);
86 err = -EINVAL;
87 goto out_unlock;
88 }
89
90 sta = sta_info_get(local, sta_addr);
91 if (!sta) {
92 ieee80211_key_free(key);
93 err = -ENOENT;
94 goto out_unlock;
95 }
91 } 96 }
97
98 ieee80211_key_link(key, sdata, sta);
99
100 if (set_tx_key || (!sta && !sdata->default_key && key))
101 ieee80211_set_default_key(sdata, idx);
92 } 102 }
93 103
94 if (set_tx_key || (!sta && !sdata->default_key && key)) 104 out_unlock:
95 ieee80211_set_default_key(sdata, idx); 105 rcu_read_unlock();
96 106
97 ret = 0; 107 return err;
98 err_out:
99 if (sta)
100 sta_info_put(sta);
101 return ret;
102} 108}
103 109
104static int ieee80211_ioctl_siwgenie(struct net_device *dev, 110static int ieee80211_ioctl_siwgenie(struct net_device *dev,
@@ -129,22 +135,7 @@ static int ieee80211_ioctl_giwname(struct net_device *dev,
129 struct iw_request_info *info, 135 struct iw_request_info *info,
130 char *name, char *extra) 136 char *name, char *extra)
131{ 137{
132 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 138 strcpy(name, "IEEE 802.11");
133
134 switch (local->hw.conf.phymode) {
135 case MODE_IEEE80211A:
136 strcpy(name, "IEEE 802.11a");
137 break;
138 case MODE_IEEE80211B:
139 strcpy(name, "IEEE 802.11b");
140 break;
141 case MODE_IEEE80211G:
142 strcpy(name, "IEEE 802.11g");
143 break;
144 default:
145 strcpy(name, "IEEE 802.11");
146 break;
147 }
148 139
149 return 0; 140 return 0;
150} 141}
@@ -156,7 +147,7 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
156{ 147{
157 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 148 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
158 struct iw_range *range = (struct iw_range *) extra; 149 struct iw_range *range = (struct iw_range *) extra;
159 struct ieee80211_hw_mode *mode = NULL; 150 enum ieee80211_band band;
160 int c = 0; 151 int c = 0;
161 152
162 data->length = sizeof(struct iw_range); 153 data->length = sizeof(struct iw_range);
@@ -191,24 +182,27 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
191 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 182 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
192 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 183 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
193 184
194 list_for_each_entry(mode, &local->modes_list, list) {
195 int i = 0;
196 185
197 if (!(local->enabled_modes & (1 << mode->mode)) || 186 for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
198 (local->hw_modes & local->enabled_modes & 187 int i;
199 (1 << MODE_IEEE80211G) && mode->mode == MODE_IEEE80211B)) 188 struct ieee80211_supported_band *sband;
189
190 sband = local->hw.wiphy->bands[band];
191
192 if (!sband)
200 continue; 193 continue;
201 194
202 while (i < mode->num_channels && c < IW_MAX_FREQUENCIES) { 195 for (i = 0; i < sband->n_channels && c < IW_MAX_FREQUENCIES; i++) {
203 struct ieee80211_channel *chan = &mode->channels[i]; 196 struct ieee80211_channel *chan = &sband->channels[i];
204 197
205 if (chan->flag & IEEE80211_CHAN_W_SCAN) { 198 if (!(chan->flags & IEEE80211_CHAN_DISABLED)) {
206 range->freq[c].i = chan->chan; 199 range->freq[c].i =
207 range->freq[c].m = chan->freq * 100000; 200 ieee80211_frequency_to_channel(
208 range->freq[c].e = 1; 201 chan->center_freq);
202 range->freq[c].m = chan->center_freq;
203 range->freq[c].e = 6;
209 c++; 204 c++;
210 } 205 }
211 i++;
212 } 206 }
213 } 207 }
214 range->num_channels = c; 208 range->num_channels = c;
@@ -242,6 +236,9 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev,
242 case IW_MODE_ADHOC: 236 case IW_MODE_ADHOC:
243 type = IEEE80211_IF_TYPE_IBSS; 237 type = IEEE80211_IF_TYPE_IBSS;
244 break; 238 break;
239 case IW_MODE_REPEAT:
240 type = IEEE80211_IF_TYPE_WDS;
241 break;
245 case IW_MODE_MONITOR: 242 case IW_MODE_MONITOR:
246 type = IEEE80211_IF_TYPE_MNTR; 243 type = IEEE80211_IF_TYPE_MNTR;
247 break; 244 break;
@@ -294,31 +291,17 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev,
294 return 0; 291 return 0;
295} 292}
296 293
297int ieee80211_set_channel(struct ieee80211_local *local, int channel, int freq) 294int ieee80211_set_freq(struct ieee80211_local *local, int freqMHz)
298{ 295{
299 struct ieee80211_hw_mode *mode;
300 int c, set = 0;
301 int ret = -EINVAL; 296 int ret = -EINVAL;
297 struct ieee80211_channel *chan;
302 298
303 list_for_each_entry(mode, &local->modes_list, list) { 299 chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
304 if (!(local->enabled_modes & (1 << mode->mode)))
305 continue;
306 for (c = 0; c < mode->num_channels; c++) {
307 struct ieee80211_channel *chan = &mode->channels[c];
308 if (chan->flag & IEEE80211_CHAN_W_SCAN &&
309 ((chan->chan == channel) || (chan->freq == freq))) {
310 local->oper_channel = chan;
311 local->oper_hw_mode = mode;
312 set = 1;
313 break;
314 }
315 }
316 if (set)
317 break;
318 }
319 300
320 if (set) { 301 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
321 if (local->sta_sw_scanning) 302 local->oper_channel = chan;
303
304 if (local->sta_sw_scanning || local->sta_hw_scanning)
322 ret = 0; 305 ret = 0;
323 else 306 else
324 ret = ieee80211_hw_config(local); 307 ret = ieee80211_hw_config(local);
@@ -347,13 +330,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
347 IEEE80211_STA_AUTO_CHANNEL_SEL; 330 IEEE80211_STA_AUTO_CHANNEL_SEL;
348 return 0; 331 return 0;
349 } else 332 } else
350 return ieee80211_set_channel(local, freq->m, -1); 333 return ieee80211_set_freq(local,
334 ieee80211_channel_to_frequency(freq->m));
351 } else { 335 } else {
352 int i, div = 1000000; 336 int i, div = 1000000;
353 for (i = 0; i < freq->e; i++) 337 for (i = 0; i < freq->e; i++)
354 div /= 10; 338 div /= 10;
355 if (div > 0) 339 if (div > 0)
356 return ieee80211_set_channel(local, -1, freq->m / div); 340 return ieee80211_set_freq(local, freq->m / div);
357 else 341 else
358 return -EINVAL; 342 return -EINVAL;
359 } 343 }
@@ -366,10 +350,7 @@ static int ieee80211_ioctl_giwfreq(struct net_device *dev,
366{ 350{
367 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 351 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
368 352
369 /* TODO: in station mode (Managed/Ad-hoc) might need to poll low-level 353 freq->m = local->hw.conf.channel->center_freq;
370 * driver for the current channel with firmware-based management */
371
372 freq->m = local->hw.conf.freq;
373 freq->e = 6; 354 freq->e = 6;
374 355
375 return 0; 356 return 0;
@@ -480,10 +461,20 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
480 ieee80211_sta_req_auth(dev, &sdata->u.sta); 461 ieee80211_sta_req_auth(dev, &sdata->u.sta);
481 return 0; 462 return 0;
482 } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { 463 } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
483 if (memcmp(sdata->u.wds.remote_addr, (u8 *) &ap_addr->sa_data, 464 /*
484 ETH_ALEN) == 0) 465 * If it is necessary to update the WDS peer address
485 return 0; 466 * while the interface is running, then we need to do
486 return ieee80211_if_update_wds(dev, (u8 *) &ap_addr->sa_data); 467 * more work here, namely if it is running we need to
468 * add a new and remove the old STA entry, this is
469 * normally handled by _open() and _stop().
470 */
471 if (netif_running(dev))
472 return -EBUSY;
473
474 memcpy(&sdata->u.wds.remote_addr, (u8 *) &ap_addr->sa_data,
475 ETH_ALEN);
476
477 return 0;
487 } 478 }
488 479
489 return -EOPNOTSUPP; 480 return -EOPNOTSUPP;
@@ -526,6 +517,7 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev,
526 517
527 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 518 if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
528 sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 519 sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
520 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT &&
529 sdata->vif.type != IEEE80211_IF_TYPE_AP) 521 sdata->vif.type != IEEE80211_IF_TYPE_AP)
530 return -EOPNOTSUPP; 522 return -EOPNOTSUPP;
531 523
@@ -566,15 +558,17 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev,
566 struct iw_param *rate, char *extra) 558 struct iw_param *rate, char *extra)
567{ 559{
568 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 560 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
569 struct ieee80211_hw_mode *mode; 561 int i, err = -EINVAL;
570 int i;
571 u32 target_rate = rate->value / 100000; 562 u32 target_rate = rate->value / 100000;
572 struct ieee80211_sub_if_data *sdata; 563 struct ieee80211_sub_if_data *sdata;
564 struct ieee80211_supported_band *sband;
573 565
574 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 566 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
575 if (!sdata->bss) 567 if (!sdata->bss)
576 return -ENODEV; 568 return -ENODEV;
577 mode = local->oper_hw_mode; 569
570 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
571
578 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates 572 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
579 * target_rate = X, rate->fixed = 1 means only rate X 573 * target_rate = X, rate->fixed = 1 means only rate X
580 * target_rate = X, rate->fixed = 0 means all rates <= X */ 574 * target_rate = X, rate->fixed = 0 means all rates <= X */
@@ -582,18 +576,20 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev,
582 sdata->bss->force_unicast_rateidx = -1; 576 sdata->bss->force_unicast_rateidx = -1;
583 if (rate->value < 0) 577 if (rate->value < 0)
584 return 0; 578 return 0;
585 for (i=0; i < mode->num_rates; i++) { 579
586 struct ieee80211_rate *rates = &mode->rates[i]; 580 for (i=0; i< sband->n_bitrates; i++) {
587 int this_rate = rates->rate; 581 struct ieee80211_rate *brate = &sband->bitrates[i];
582 int this_rate = brate->bitrate;
588 583
589 if (target_rate == this_rate) { 584 if (target_rate == this_rate) {
590 sdata->bss->max_ratectrl_rateidx = i; 585 sdata->bss->max_ratectrl_rateidx = i;
591 if (rate->fixed) 586 if (rate->fixed)
592 sdata->bss->force_unicast_rateidx = i; 587 sdata->bss->force_unicast_rateidx = i;
593 return 0; 588 err = 0;
589 break;
594 } 590 }
595 } 591 }
596 return -EINVAL; 592 return err;
597} 593}
598 594
599static int ieee80211_ioctl_giwrate(struct net_device *dev, 595static int ieee80211_ioctl_giwrate(struct net_device *dev,
@@ -603,19 +599,31 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev,
603 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 599 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
604 struct sta_info *sta; 600 struct sta_info *sta;
605 struct ieee80211_sub_if_data *sdata; 601 struct ieee80211_sub_if_data *sdata;
602 struct ieee80211_supported_band *sband;
606 603
607 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 604 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
608 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) 605
609 sta = sta_info_get(local, sdata->u.sta.bssid); 606 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
610 else
611 return -EOPNOTSUPP; 607 return -EOPNOTSUPP;
612 if (!sta) 608
613 return -ENODEV; 609 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
614 if (sta->txrate < local->oper_hw_mode->num_rates) 610
615 rate->value = local->oper_hw_mode->rates[sta->txrate].rate * 100000; 611 rcu_read_lock();
612
613 sta = sta_info_get(local, sdata->u.sta.bssid);
614
615 if (sta && sta->txrate_idx < sband->n_bitrates)
616 rate->value = sband->bitrates[sta->txrate_idx].bitrate;
616 else 617 else
617 rate->value = 0; 618 rate->value = 0;
618 sta_info_put(sta); 619
620 rcu_read_unlock();
621
622 if (!sta)
623 return -ENODEV;
624
625 rate->value *= 100000;
626
619 return 0; 627 return 0;
620} 628}
621 629
@@ -625,7 +633,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
625{ 633{
626 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 634 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
627 bool need_reconfig = 0; 635 bool need_reconfig = 0;
628 u8 new_power_level; 636 int new_power_level;
629 637
630 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) 638 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
631 return -EINVAL; 639 return -EINVAL;
@@ -635,13 +643,15 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
635 if (data->txpower.fixed) { 643 if (data->txpower.fixed) {
636 new_power_level = data->txpower.value; 644 new_power_level = data->txpower.value;
637 } else { 645 } else {
638 /* Automatic power level. Get the px power from the current 646 /*
639 * channel. */ 647 * Automatic power level. Use maximum power for the current
640 struct ieee80211_channel* chan = local->oper_channel; 648 * channel. Should be part of rate control.
649 */
650 struct ieee80211_channel* chan = local->hw.conf.channel;
641 if (!chan) 651 if (!chan)
642 return -EINVAL; 652 return -EINVAL;
643 653
644 new_power_level = chan->power_level; 654 new_power_level = chan->max_power;
645 } 655 }
646 656
647 if (local->hw.conf.power_level != new_power_level) { 657 if (local->hw.conf.power_level != new_power_level) {
@@ -973,6 +983,8 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev
973 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 983 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
974 struct sta_info *sta = NULL; 984 struct sta_info *sta = NULL;
975 985
986 rcu_read_lock();
987
976 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 988 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
977 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) 989 sdata->vif.type == IEEE80211_IF_TYPE_IBSS)
978 sta = sta_info_get(local, sdata->u.sta.bssid); 990 sta = sta_info_get(local, sdata->u.sta.bssid);
@@ -988,8 +1000,10 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev
988 wstats->qual.qual = sta->last_signal; 1000 wstats->qual.qual = sta->last_signal;
989 wstats->qual.noise = sta->last_noise; 1001 wstats->qual.noise = sta->last_noise;
990 wstats->qual.updated = local->wstats_flags; 1002 wstats->qual.updated = local->wstats_flags;
991 sta_info_put(sta);
992 } 1003 }
1004
1005 rcu_read_unlock();
1006
993 return wstats; 1007 return wstats;
994} 1008}
995 1009
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 4e236599dd31..4e94e4026e78 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -19,10 +19,13 @@
19#include "wme.h" 19#include "wme.h"
20 20
21/* maximum number of hardware queues we support. */ 21/* maximum number of hardware queues we support. */
22#define TC_80211_MAX_QUEUES 8 22#define TC_80211_MAX_QUEUES 16
23
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
23 25
24struct ieee80211_sched_data 26struct ieee80211_sched_data
25{ 27{
28 unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)];
26 struct tcf_proto *filter_list; 29 struct tcf_proto *filter_list;
27 struct Qdisc *queues[TC_80211_MAX_QUEUES]; 30 struct Qdisc *queues[TC_80211_MAX_QUEUES];
28 struct sk_buff_head requeued[TC_80211_MAX_QUEUES]; 31 struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
@@ -98,7 +101,6 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
98 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
99 unsigned short fc = le16_to_cpu(hdr->frame_control); 102 unsigned short fc = le16_to_cpu(hdr->frame_control);
100 int qos; 103 int qos;
101 const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
102 104
103 /* see if frame is data or non data frame */ 105 /* see if frame is data or non data frame */
104 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) { 106 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
@@ -146,9 +148,26 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
146 unsigned short fc = le16_to_cpu(hdr->frame_control); 148 unsigned short fc = le16_to_cpu(hdr->frame_control);
147 struct Qdisc *qdisc; 149 struct Qdisc *qdisc;
148 int err, queue; 150 int err, queue;
151 struct sta_info *sta;
152 u8 tid;
149 153
150 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { 154 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
151 skb_queue_tail(&q->requeued[pkt_data->queue], skb); 155 queue = pkt_data->queue;
156 rcu_read_lock();
157 sta = sta_info_get(local, hdr->addr1);
158 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
159 if (sta) {
160 int ampdu_queue = sta->tid_to_tx_q[tid];
161 if ((ampdu_queue < local->hw.queues) &&
162 test_bit(ampdu_queue, q->qdisc_pool)) {
163 queue = ampdu_queue;
164 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
165 } else {
166 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
167 }
168 }
169 rcu_read_unlock();
170 skb_queue_tail(&q->requeued[queue], skb);
152 qd->q.qlen++; 171 qd->q.qlen++;
153 return 0; 172 return 0;
154 } 173 }
@@ -159,14 +178,31 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
159 */ 178 */
160 if (WLAN_FC_IS_QOS_DATA(fc)) { 179 if (WLAN_FC_IS_QOS_DATA(fc)) {
161 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2; 180 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
162 u8 qos_hdr = skb->priority & QOS_CONTROL_TAG1D_MASK; 181 u8 ack_policy = 0;
182 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
163 if (local->wifi_wme_noack_test) 183 if (local->wifi_wme_noack_test)
164 qos_hdr |= QOS_CONTROL_ACK_POLICY_NOACK << 184 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
165 QOS_CONTROL_ACK_POLICY_SHIFT; 185 QOS_CONTROL_ACK_POLICY_SHIFT;
166 /* qos header is 2 bytes, second reserved */ 186 /* qos header is 2 bytes, second reserved */
167 *p = qos_hdr; 187 *p = ack_policy | tid;
168 p++; 188 p++;
169 *p = 0; 189 *p = 0;
190
191 rcu_read_lock();
192
193 sta = sta_info_get(local, hdr->addr1);
194 if (sta) {
195 int ampdu_queue = sta->tid_to_tx_q[tid];
196 if ((ampdu_queue < local->hw.queues) &&
197 test_bit(ampdu_queue, q->qdisc_pool)) {
198 queue = ampdu_queue;
199 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
200 } else {
201 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
202 }
203 }
204
205 rcu_read_unlock();
170 } 206 }
171 207
172 if (unlikely(queue >= local->hw.queues)) { 208 if (unlikely(queue >= local->hw.queues)) {
@@ -184,6 +220,7 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
184 kfree_skb(skb); 220 kfree_skb(skb);
185 err = NET_XMIT_DROP; 221 err = NET_XMIT_DROP;
186 } else { 222 } else {
223 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
187 pkt_data->queue = (unsigned int) queue; 224 pkt_data->queue = (unsigned int) queue;
188 qdisc = q->queues[queue]; 225 qdisc = q->queues[queue];
189 err = qdisc->enqueue(skb, qdisc); 226 err = qdisc->enqueue(skb, qdisc);
@@ -235,10 +272,11 @@ static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
235 /* check all the h/w queues in numeric/priority order */ 272 /* check all the h/w queues in numeric/priority order */
236 for (queue = 0; queue < hw->queues; queue++) { 273 for (queue = 0; queue < hw->queues; queue++) {
237 /* see if there is room in this hardware queue */ 274 /* see if there is room in this hardware queue */
238 if (test_bit(IEEE80211_LINK_STATE_XOFF, 275 if ((test_bit(IEEE80211_LINK_STATE_XOFF,
239 &local->state[queue]) || 276 &local->state[queue])) ||
240 test_bit(IEEE80211_LINK_STATE_PENDING, 277 (test_bit(IEEE80211_LINK_STATE_PENDING,
241 &local->state[queue])) 278 &local->state[queue])) ||
279 (!test_bit(queue, q->qdisc_pool)))
242 continue; 280 continue;
243 281
244 /* there is space - try and get a frame */ 282 /* there is space - try and get a frame */
@@ -360,6 +398,10 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
360 } 398 }
361 } 399 }
362 400
401 /* reserve all legacy QoS queues */
402 for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++)
403 set_bit(i, q->qdisc_pool);
404
363 return err; 405 return err;
364} 406}
365 407
@@ -605,3 +647,80 @@ void ieee80211_wme_unregister(void)
605{ 647{
606 unregister_qdisc(&wme_qdisc_ops); 648 unregister_qdisc(&wme_qdisc_ops);
607} 649}
650
651int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
652 struct sta_info *sta, u16 tid)
653{
654 int i;
655 struct ieee80211_sched_data *q =
656 qdisc_priv(local->mdev->qdisc_sleeping);
657 DECLARE_MAC_BUF(mac);
658
659 /* prepare the filter and save it for the SW queue
660 * matching the recieved HW queue */
661
662 /* try to get a Qdisc from the pool */
663 for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++)
664 if (!test_and_set_bit(i, q->qdisc_pool)) {
665 ieee80211_stop_queue(local_to_hw(local), i);
666 sta->tid_to_tx_q[tid] = i;
667
668 /* IF there are already pending packets
669 * on this tid first we need to drain them
670 * on the previous queue
671 * since HT is strict in order */
672#ifdef CONFIG_MAC80211_HT_DEBUG
673 if (net_ratelimit())
674 printk(KERN_DEBUG "allocated aggregation queue"
675 " %d tid %d addr %s pool=0x%lX",
676 i, tid, print_mac(mac, sta->addr),
677 q->qdisc_pool[0]);
678#endif /* CONFIG_MAC80211_HT_DEBUG */
679 return 0;
680 }
681
682 return -EAGAIN;
683}
684
685/**
686 * the caller needs to hold local->mdev->queue_lock
687 */
688void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
689 struct sta_info *sta, u16 tid,
690 u8 requeue)
691{
692 struct ieee80211_sched_data *q =
693 qdisc_priv(local->mdev->qdisc_sleeping);
694 int agg_queue = sta->tid_to_tx_q[tid];
695
696 /* return the qdisc to the pool */
697 clear_bit(agg_queue, q->qdisc_pool);
698 sta->tid_to_tx_q[tid] = local->hw.queues;
699
700 if (requeue)
701 ieee80211_requeue(local, agg_queue);
702 else
703 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
704}
705
706void ieee80211_requeue(struct ieee80211_local *local, int queue)
707{
708 struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
709 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
710 struct Qdisc *qdisc = q->queues[queue];
711 struct sk_buff *skb = NULL;
712 u32 len = qdisc->q.qlen;
713
714 if (!qdisc || !qdisc->dequeue)
715 return;
716
717 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
718 for (len = qdisc->q.qlen; len > 0; len--) {
719 skb = qdisc->dequeue(qdisc);
720 root_qd->q.qlen--;
721 /* packet will be classified again and */
722 /* skb->packet_data->queue will be overridden if needed */
723 if (skb)
724 wme_qdiscop_enqueue(skb, root_qd);
725 }
726}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 76c713a6450c..fcc6b05508cc 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -24,6 +24,8 @@
24 24
25#define QOS_CONTROL_TAG1D_MASK 0x07 25#define QOS_CONTROL_TAG1D_MASK 0x07
26 26
27extern const int ieee802_1d_to_ac[8];
28
27static inline int WLAN_FC_IS_QOS_DATA(u16 fc) 29static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
28{ 30{
29 return (fc & 0x8C) == 0x88; 31 return (fc & 0x8C) == 0x88;
@@ -32,7 +34,12 @@ static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
32#ifdef CONFIG_NET_SCHED 34#ifdef CONFIG_NET_SCHED
33void ieee80211_install_qdisc(struct net_device *dev); 35void ieee80211_install_qdisc(struct net_device *dev);
34int ieee80211_qdisc_installed(struct net_device *dev); 36int ieee80211_qdisc_installed(struct net_device *dev);
35 37int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
38 struct sta_info *sta, u16 tid);
39void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
40 struct sta_info *sta, u16 tid,
41 u8 requeue);
42void ieee80211_requeue(struct ieee80211_local *local, int queue);
36int ieee80211_wme_register(void); 43int ieee80211_wme_register(void);
37void ieee80211_wme_unregister(void); 44void ieee80211_wme_unregister(void);
38#else 45#else
@@ -43,7 +50,19 @@ static inline int ieee80211_qdisc_installed(struct net_device *dev)
43{ 50{
44 return 0; 51 return 0;
45} 52}
46 53static inline int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
54 struct sta_info *sta, u16 tid)
55{
56 return -EAGAIN;
57}
58static inline void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
59 struct sta_info *sta, u16 tid,
60 u8 requeue)
61{
62}
63static inline void ieee80211_requeue(struct ieee80211_local *local, int queue)
64{
65}
47static inline int ieee80211_wme_register(void) 66static inline int ieee80211_wme_register(void)
48{ 67{
49 return 0; 68 return 0;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 6f04311cf0a0..45709ada8fee 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -70,8 +70,8 @@ static int ieee80211_get_hdr_info(const struct sk_buff *skb, u8 **sa, u8 **da,
70} 70}
71 71
72 72
73ieee80211_txrx_result 73ieee80211_tx_result
74ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx) 74ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
75{ 75{
76 u8 *data, *sa, *da, *key, *mic, qos_tid; 76 u8 *data, *sa, *da, *key, *mic, qos_tid;
77 size_t data_len; 77 size_t data_len;
@@ -84,18 +84,18 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx)
84 84
85 if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || 85 if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 ||
86 !WLAN_FC_DATA_PRESENT(fc)) 86 !WLAN_FC_DATA_PRESENT(fc))
87 return TXRX_CONTINUE; 87 return TX_CONTINUE;
88 88
89 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)) 89 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len))
90 return TXRX_DROP; 90 return TX_DROP;
91 91
92 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 92 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
93 !(tx->flags & IEEE80211_TXRXD_FRAGMENTED) && 93 !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
94 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && 94 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) &&
95 !wpa_test) { 95 !wpa_test) {
96 /* hwaccel - with no need for preallocated room for Michael MIC 96 /* hwaccel - with no need for preallocated room for Michael MIC
97 */ 97 */
98 return TXRX_CONTINUE; 98 return TX_CONTINUE;
99 } 99 }
100 100
101 if (skb_tailroom(skb) < MICHAEL_MIC_LEN) { 101 if (skb_tailroom(skb) < MICHAEL_MIC_LEN) {
@@ -105,7 +105,7 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx)
105 GFP_ATOMIC))) { 105 GFP_ATOMIC))) {
106 printk(KERN_DEBUG "%s: failed to allocate more memory " 106 printk(KERN_DEBUG "%s: failed to allocate more memory "
107 "for Michael MIC\n", tx->dev->name); 107 "for Michael MIC\n", tx->dev->name);
108 return TXRX_DROP; 108 return TX_DROP;
109 } 109 }
110 } 110 }
111 111
@@ -119,12 +119,12 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx)
119 mic = skb_put(skb, MICHAEL_MIC_LEN); 119 mic = skb_put(skb, MICHAEL_MIC_LEN);
120 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); 120 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic);
121 121
122 return TXRX_CONTINUE; 122 return TX_CONTINUE;
123} 123}
124 124
125 125
126ieee80211_txrx_result 126ieee80211_rx_result
127ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx) 127ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
128{ 128{
129 u8 *data, *sa, *da, *key = NULL, qos_tid; 129 u8 *data, *sa, *da, *key = NULL, qos_tid;
130 size_t data_len; 130 size_t data_len;
@@ -139,16 +139,16 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx)
139 /* 139 /*
140 * No way to verify the MIC if the hardware stripped it 140 * No way to verify the MIC if the hardware stripped it
141 */ 141 */
142 if (rx->u.rx.status->flag & RX_FLAG_MMIC_STRIPPED) 142 if (rx->status->flag & RX_FLAG_MMIC_STRIPPED)
143 return TXRX_CONTINUE; 143 return RX_CONTINUE;
144 144
145 if (!rx->key || rx->key->conf.alg != ALG_TKIP || 145 if (!rx->key || rx->key->conf.alg != ALG_TKIP ||
146 !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc)) 146 !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc))
147 return TXRX_CONTINUE; 147 return RX_CONTINUE;
148 148
149 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len) 149 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)
150 || data_len < MICHAEL_MIC_LEN) 150 || data_len < MICHAEL_MIC_LEN)
151 return TXRX_DROP; 151 return RX_DROP_UNUSABLE;
152 152
153 data_len -= MICHAEL_MIC_LEN; 153 data_len -= MICHAEL_MIC_LEN;
154 154
@@ -161,29 +161,29 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx)
161 ALG_TKIP_TEMP_AUTH_TX_MIC_KEY]; 161 ALG_TKIP_TEMP_AUTH_TX_MIC_KEY];
162 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); 162 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic);
163 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { 163 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
164 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 164 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
165 return TXRX_DROP; 165 return RX_DROP_UNUSABLE;
166 166
167 printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from " 167 printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from "
168 "%s\n", rx->dev->name, print_mac(mac, sa)); 168 "%s\n", rx->dev->name, print_mac(mac, sa));
169 169
170 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, 170 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx,
171 (void *) skb->data); 171 (void *) skb->data);
172 return TXRX_DROP; 172 return RX_DROP_UNUSABLE;
173 } 173 }
174 174
175 /* remove Michael MIC from payload */ 175 /* remove Michael MIC from payload */
176 skb_trim(skb, skb->len - MICHAEL_MIC_LEN); 176 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
177 177
178 /* update IV in key information to be able to detect replays */ 178 /* update IV in key information to be able to detect replays */
179 rx->key->u.tkip.iv32_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv32; 179 rx->key->u.tkip.iv32_rx[rx->queue] = rx->tkip_iv32;
180 rx->key->u.tkip.iv16_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv16; 180 rx->key->u.tkip.iv16_rx[rx->queue] = rx->tkip_iv16;
181 181
182 return TXRX_CONTINUE; 182 return RX_CONTINUE;
183} 183}
184 184
185 185
186static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx, 186static int tkip_encrypt_skb(struct ieee80211_tx_data *tx,
187 struct sk_buff *skb, int test) 187 struct sk_buff *skb, int test)
188{ 188{
189 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 189 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -228,7 +228,7 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx,
228 0x7f), 228 0x7f),
229 (u8) key->u.tkip.iv16); 229 (u8) key->u.tkip.iv16);
230 230
231 tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; 231 tx->control->key_idx = tx->key->conf.hw_key_idx;
232 return 0; 232 return 0;
233 } 233 }
234 234
@@ -242,42 +242,42 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx,
242} 242}
243 243
244 244
245ieee80211_txrx_result 245ieee80211_tx_result
246ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx) 246ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
247{ 247{
248 struct sk_buff *skb = tx->skb; 248 struct sk_buff *skb = tx->skb;
249 int wpa_test = 0, test = 0; 249 int wpa_test = 0, test = 0;
250 250
251 tx->u.tx.control->icv_len = TKIP_ICV_LEN; 251 tx->control->icv_len = TKIP_ICV_LEN;
252 tx->u.tx.control->iv_len = TKIP_IV_LEN; 252 tx->control->iv_len = TKIP_IV_LEN;
253 ieee80211_tx_set_iswep(tx); 253 ieee80211_tx_set_protected(tx);
254 254
255 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 255 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
256 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) && 256 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
257 !wpa_test) { 257 !wpa_test) {
258 /* hwaccel - with no need for preallocated room for IV/ICV */ 258 /* hwaccel - with no need for preallocated room for IV/ICV */
259 tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; 259 tx->control->key_idx = tx->key->conf.hw_key_idx;
260 return TXRX_CONTINUE; 260 return TX_CONTINUE;
261 } 261 }
262 262
263 if (tkip_encrypt_skb(tx, skb, test) < 0) 263 if (tkip_encrypt_skb(tx, skb, test) < 0)
264 return TXRX_DROP; 264 return TX_DROP;
265 265
266 if (tx->u.tx.extra_frag) { 266 if (tx->extra_frag) {
267 int i; 267 int i;
268 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 268 for (i = 0; i < tx->num_extra_frag; i++) {
269 if (tkip_encrypt_skb(tx, tx->u.tx.extra_frag[i], test) 269 if (tkip_encrypt_skb(tx, tx->extra_frag[i], test)
270 < 0) 270 < 0)
271 return TXRX_DROP; 271 return TX_DROP;
272 } 272 }
273 } 273 }
274 274
275 return TXRX_CONTINUE; 275 return TX_CONTINUE;
276} 276}
277 277
278 278
279ieee80211_txrx_result 279ieee80211_rx_result
280ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx) 280ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
281{ 281{
282 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 282 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
283 u16 fc; 283 u16 fc;
@@ -290,19 +290,19 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
290 hdrlen = ieee80211_get_hdrlen(fc); 290 hdrlen = ieee80211_get_hdrlen(fc);
291 291
292 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 292 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
293 return TXRX_CONTINUE; 293 return RX_CONTINUE;
294 294
295 if (!rx->sta || skb->len - hdrlen < 12) 295 if (!rx->sta || skb->len - hdrlen < 12)
296 return TXRX_DROP; 296 return RX_DROP_UNUSABLE;
297 297
298 if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) { 298 if (rx->status->flag & RX_FLAG_DECRYPTED) {
299 if (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) { 299 if (rx->status->flag & RX_FLAG_IV_STRIPPED) {
300 /* 300 /*
301 * Hardware took care of all processing, including 301 * Hardware took care of all processing, including
302 * replay protection, and stripped the ICV/IV so 302 * replay protection, and stripped the ICV/IV so
303 * we cannot do any checks here. 303 * we cannot do any checks here.
304 */ 304 */
305 return TXRX_CONTINUE; 305 return RX_CONTINUE;
306 } 306 }
307 307
308 /* let TKIP code verify IV, but skip decryption */ 308 /* let TKIP code verify IV, but skip decryption */
@@ -312,9 +312,9 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
312 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 312 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
313 key, skb->data + hdrlen, 313 key, skb->data + hdrlen,
314 skb->len - hdrlen, rx->sta->addr, 314 skb->len - hdrlen, rx->sta->addr,
315 hwaccel, rx->u.rx.queue, 315 hdr->addr1, hwaccel, rx->queue,
316 &rx->u.rx.tkip_iv32, 316 &rx->tkip_iv32,
317 &rx->u.rx.tkip_iv16); 317 &rx->tkip_iv16);
318 if (res != TKIP_DECRYPT_OK || wpa_test) { 318 if (res != TKIP_DECRYPT_OK || wpa_test) {
319#ifdef CONFIG_MAC80211_DEBUG 319#ifdef CONFIG_MAC80211_DEBUG
320 if (net_ratelimit()) 320 if (net_ratelimit())
@@ -322,7 +322,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
322 "frame from %s (res=%d)\n", rx->dev->name, 322 "frame from %s (res=%d)\n", rx->dev->name,
323 print_mac(mac, rx->sta->addr), res); 323 print_mac(mac, rx->sta->addr), res);
324#endif /* CONFIG_MAC80211_DEBUG */ 324#endif /* CONFIG_MAC80211_DEBUG */
325 return TXRX_DROP; 325 return RX_DROP_UNUSABLE;
326 } 326 }
327 327
328 /* Trim ICV */ 328 /* Trim ICV */
@@ -332,7 +332,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
332 memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); 332 memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen);
333 skb_pull(skb, TKIP_IV_LEN); 333 skb_pull(skb, TKIP_IV_LEN);
334 334
335 return TXRX_CONTINUE; 335 return RX_CONTINUE;
336} 336}
337 337
338 338
@@ -429,7 +429,7 @@ static inline int ccmp_hdr2pn(u8 *pn, u8 *hdr)
429} 429}
430 430
431 431
432static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx, 432static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx,
433 struct sk_buff *skb, int test) 433 struct sk_buff *skb, int test)
434{ 434{
435 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 435 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -478,7 +478,7 @@ static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx,
478 478
479 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 479 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
480 /* hwaccel - with preallocated room for CCMP header */ 480 /* hwaccel - with preallocated room for CCMP header */
481 tx->u.tx.control->key_idx = key->conf.hw_key_idx; 481 tx->control->key_idx = key->conf.hw_key_idx;
482 return 0; 482 return 0;
483 } 483 }
484 484
@@ -491,42 +491,42 @@ static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx,
491} 491}
492 492
493 493
494ieee80211_txrx_result 494ieee80211_tx_result
495ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx) 495ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
496{ 496{
497 struct sk_buff *skb = tx->skb; 497 struct sk_buff *skb = tx->skb;
498 int test = 0; 498 int test = 0;
499 499
500 tx->u.tx.control->icv_len = CCMP_MIC_LEN; 500 tx->control->icv_len = CCMP_MIC_LEN;
501 tx->u.tx.control->iv_len = CCMP_HDR_LEN; 501 tx->control->iv_len = CCMP_HDR_LEN;
502 ieee80211_tx_set_iswep(tx); 502 ieee80211_tx_set_protected(tx);
503 503
504 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 504 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
505 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { 505 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
506 /* hwaccel - with no need for preallocated room for CCMP " 506 /* hwaccel - with no need for preallocated room for CCMP "
507 * header or MIC fields */ 507 * header or MIC fields */
508 tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; 508 tx->control->key_idx = tx->key->conf.hw_key_idx;
509 return TXRX_CONTINUE; 509 return TX_CONTINUE;
510 } 510 }
511 511
512 if (ccmp_encrypt_skb(tx, skb, test) < 0) 512 if (ccmp_encrypt_skb(tx, skb, test) < 0)
513 return TXRX_DROP; 513 return TX_DROP;
514 514
515 if (tx->u.tx.extra_frag) { 515 if (tx->extra_frag) {
516 int i; 516 int i;
517 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 517 for (i = 0; i < tx->num_extra_frag; i++) {
518 if (ccmp_encrypt_skb(tx, tx->u.tx.extra_frag[i], test) 518 if (ccmp_encrypt_skb(tx, tx->extra_frag[i], test)
519 < 0) 519 < 0)
520 return TXRX_DROP; 520 return TX_DROP;
521 } 521 }
522 } 522 }
523 523
524 return TXRX_CONTINUE; 524 return TX_CONTINUE;
525} 525}
526 526
527 527
528ieee80211_txrx_result 528ieee80211_rx_result
529ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx) 529ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
530{ 530{
531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
532 u16 fc; 532 u16 fc;
@@ -541,21 +541,21 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
541 hdrlen = ieee80211_get_hdrlen(fc); 541 hdrlen = ieee80211_get_hdrlen(fc);
542 542
543 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 543 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
544 return TXRX_CONTINUE; 544 return RX_CONTINUE;
545 545
546 data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; 546 data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN;
547 if (!rx->sta || data_len < 0) 547 if (!rx->sta || data_len < 0)
548 return TXRX_DROP; 548 return RX_DROP_UNUSABLE;
549 549
550 if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && 550 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
551 (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) 551 (rx->status->flag & RX_FLAG_IV_STRIPPED))
552 return TXRX_CONTINUE; 552 return RX_CONTINUE;
553 553
554 (void) ccmp_hdr2pn(pn, skb->data + hdrlen); 554 (void) ccmp_hdr2pn(pn, skb->data + hdrlen);
555 555
556 if (memcmp(pn, key->u.ccmp.rx_pn[rx->u.rx.queue], CCMP_PN_LEN) <= 0) { 556 if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) {
557#ifdef CONFIG_MAC80211_DEBUG 557#ifdef CONFIG_MAC80211_DEBUG
558 u8 *ppn = key->u.ccmp.rx_pn[rx->u.rx.queue]; 558 u8 *ppn = key->u.ccmp.rx_pn[rx->queue];
559 559
560 printk(KERN_DEBUG "%s: CCMP replay detected for RX frame from " 560 printk(KERN_DEBUG "%s: CCMP replay detected for RX frame from "
561 "%s (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN " 561 "%s (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN "
@@ -565,10 +565,10 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
565 ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]); 565 ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]);
566#endif /* CONFIG_MAC80211_DEBUG */ 566#endif /* CONFIG_MAC80211_DEBUG */
567 key->u.ccmp.replays++; 567 key->u.ccmp.replays++;
568 return TXRX_DROP; 568 return RX_DROP_UNUSABLE;
569 } 569 }
570 570
571 if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { 571 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
572 /* hardware didn't decrypt/verify MIC */ 572 /* hardware didn't decrypt/verify MIC */
573 u8 *scratch, *b_0, *aad; 573 u8 *scratch, *b_0, *aad;
574 574
@@ -589,16 +589,16 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
589 "for RX frame from %s\n", rx->dev->name, 589 "for RX frame from %s\n", rx->dev->name,
590 print_mac(mac, rx->sta->addr)); 590 print_mac(mac, rx->sta->addr));
591#endif /* CONFIG_MAC80211_DEBUG */ 591#endif /* CONFIG_MAC80211_DEBUG */
592 return TXRX_DROP; 592 return RX_DROP_UNUSABLE;
593 } 593 }
594 } 594 }
595 595
596 memcpy(key->u.ccmp.rx_pn[rx->u.rx.queue], pn, CCMP_PN_LEN); 596 memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN);
597 597
598 /* Remove CCMP header and MIC */ 598 /* Remove CCMP header and MIC */
599 skb_trim(skb, skb->len - CCMP_MIC_LEN); 599 skb_trim(skb, skb->len - CCMP_MIC_LEN);
600 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); 600 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen);
601 skb_pull(skb, CCMP_HDR_LEN); 601 skb_pull(skb, CCMP_HDR_LEN);
602 602
603 return TXRX_CONTINUE; 603 return RX_CONTINUE;
604} 604}
diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h
index 49d80cf0cd75..d42d221d8a1d 100644
--- a/net/mac80211/wpa.h
+++ b/net/mac80211/wpa.h
@@ -13,19 +13,19 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include "ieee80211_i.h" 14#include "ieee80211_i.h"
15 15
16ieee80211_txrx_result 16ieee80211_tx_result
17ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx); 17ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx);
18ieee80211_txrx_result 18ieee80211_rx_result
19ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx); 19ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx);
20 20
21ieee80211_txrx_result 21ieee80211_tx_result
22ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx); 22ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx);
23ieee80211_txrx_result 23ieee80211_rx_result
24ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx); 24ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx);
25 25
26ieee80211_txrx_result 26ieee80211_tx_result
27ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx); 27ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx);
28ieee80211_txrx_result 28ieee80211_rx_result
29ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx); 29ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx);
30 30
31#endif /* WPA_H */ 31#endif /* WPA_H */
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index daf5b881064d..c1fc0f1a641c 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -86,6 +86,16 @@ config NF_CONNTRACK_EVENTS
86 86
87 If unsure, say `N'. 87 If unsure, say `N'.
88 88
89config NF_CT_PROTO_DCCP
90 tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
91 depends on EXPERIMENTAL && NF_CONNTRACK
92 depends on NETFILTER_ADVANCED
93 help
94 With this option enabled, the layer 3 independent connection
95 tracking code will be able to do state tracking on DCCP connections.
96
97 If unsure, say 'N'.
98
89config NF_CT_PROTO_GRE 99config NF_CT_PROTO_GRE
90 tristate 100 tristate
91 depends on NF_CONNTRACK 101 depends on NF_CONNTRACK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ea7508387f95..5c4b183f6422 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
13obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o 13obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
14 14
15# SCTP protocol connection tracking 15# SCTP protocol connection tracking
16obj-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
16obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o 17obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
17obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o 18obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
18obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o 19obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index c4065b8f9a95..292fa28146fb 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -165,6 +165,14 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
165 unsigned int verdict; 165 unsigned int verdict;
166 int ret = 0; 166 int ret = 0;
167 167
168#ifdef CONFIG_NET_NS
169 struct net *net;
170
171 net = indev == NULL ? dev_net(outdev) : dev_net(indev);
172 if (net != &init_net)
173 return 1;
174#endif
175
168 /* We may already have this, but read-locks nest anyway */ 176 /* We may already have this, but read-locks nest anyway */
169 rcu_read_lock(); 177 rcu_read_lock();
170 178
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 7b8239c0cd5e..38aedeeaf4e1 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -53,7 +53,7 @@ enum amanda_strings {
53}; 53};
54 54
55static struct { 55static struct {
56 char *string; 56 const char *string;
57 size_t len; 57 size_t len;
58 struct ts_config *ts; 58 struct ts_config *ts;
59} search[] __read_mostly = { 59} search[] __read_mostly = {
@@ -91,7 +91,6 @@ static int amanda_help(struct sk_buff *skb,
91 char pbuf[sizeof("65535")], *tmp; 91 char pbuf[sizeof("65535")], *tmp;
92 u_int16_t len; 92 u_int16_t len;
93 __be16 port; 93 __be16 port;
94 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
95 int ret = NF_ACCEPT; 94 int ret = NF_ACCEPT;
96 typeof(nf_nat_amanda_hook) nf_nat_amanda; 95 typeof(nf_nat_amanda_hook) nf_nat_amanda;
97 96
@@ -148,7 +147,9 @@ static int amanda_help(struct sk_buff *skb,
148 goto out; 147 goto out;
149 } 148 }
150 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 149 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
151 nf_ct_expect_init(exp, family, &tuple->src.u3, &tuple->dst.u3, 150 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
151 nf_ct_l3num(ct),
152 &tuple->src.u3, &tuple->dst.u3,
152 IPPROTO_TCP, NULL, &port); 153 IPPROTO_TCP, NULL, &port);
153 154
154 nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook); 155 nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook);
@@ -164,26 +165,29 @@ out:
164 return ret; 165 return ret;
165} 166}
166 167
168static const struct nf_conntrack_expect_policy amanda_exp_policy = {
169 .max_expected = 3,
170 .timeout = 180,
171};
172
167static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { 173static struct nf_conntrack_helper amanda_helper[2] __read_mostly = {
168 { 174 {
169 .name = "amanda", 175 .name = "amanda",
170 .max_expected = 3,
171 .timeout = 180,
172 .me = THIS_MODULE, 176 .me = THIS_MODULE,
173 .help = amanda_help, 177 .help = amanda_help,
174 .tuple.src.l3num = AF_INET, 178 .tuple.src.l3num = AF_INET,
175 .tuple.src.u.udp.port = __constant_htons(10080), 179 .tuple.src.u.udp.port = __constant_htons(10080),
176 .tuple.dst.protonum = IPPROTO_UDP, 180 .tuple.dst.protonum = IPPROTO_UDP,
181 .expect_policy = &amanda_exp_policy,
177 }, 182 },
178 { 183 {
179 .name = "amanda", 184 .name = "amanda",
180 .max_expected = 3,
181 .timeout = 180,
182 .me = THIS_MODULE, 185 .me = THIS_MODULE,
183 .help = amanda_help, 186 .help = amanda_help,
184 .tuple.src.l3num = AF_INET6, 187 .tuple.src.l3num = AF_INET6,
185 .tuple.src.u.udp.port = __constant_htons(10080), 188 .tuple.src.u.udp.port = __constant_htons(10080),
186 .tuple.dst.protonum = IPPROTO_UDP, 189 .tuple.dst.protonum = IPPROTO_UDP,
190 .expect_policy = &amanda_exp_policy,
187 }, 191 },
188}; 192};
189 193
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index b77eb56a87e3..4eac65c74ed0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -94,7 +94,7 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
94 nf_conntrack_hash_rnd); 94 nf_conntrack_hash_rnd);
95} 95}
96 96
97int 97bool
98nf_ct_get_tuple(const struct sk_buff *skb, 98nf_ct_get_tuple(const struct sk_buff *skb,
99 unsigned int nhoff, 99 unsigned int nhoff,
100 unsigned int dataoff, 100 unsigned int dataoff,
@@ -108,7 +108,7 @@ nf_ct_get_tuple(const struct sk_buff *skb,
108 108
109 tuple->src.l3num = l3num; 109 tuple->src.l3num = l3num;
110 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) 110 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
111 return 0; 111 return false;
112 112
113 tuple->dst.protonum = protonum; 113 tuple->dst.protonum = protonum;
114 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 114 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
@@ -117,10 +117,8 @@ nf_ct_get_tuple(const struct sk_buff *skb,
117} 117}
118EXPORT_SYMBOL_GPL(nf_ct_get_tuple); 118EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
119 119
120int nf_ct_get_tuplepr(const struct sk_buff *skb, 120bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
121 unsigned int nhoff, 121 u_int16_t l3num, struct nf_conntrack_tuple *tuple)
122 u_int16_t l3num,
123 struct nf_conntrack_tuple *tuple)
124{ 122{
125 struct nf_conntrack_l3proto *l3proto; 123 struct nf_conntrack_l3proto *l3proto;
126 struct nf_conntrack_l4proto *l4proto; 124 struct nf_conntrack_l4proto *l4proto;
@@ -134,7 +132,7 @@ int nf_ct_get_tuplepr(const struct sk_buff *skb,
134 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); 132 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
135 if (ret != NF_ACCEPT) { 133 if (ret != NF_ACCEPT) {
136 rcu_read_unlock(); 134 rcu_read_unlock();
137 return 0; 135 return false;
138 } 136 }
139 137
140 l4proto = __nf_ct_l4proto_find(l3num, protonum); 138 l4proto = __nf_ct_l4proto_find(l3num, protonum);
@@ -147,7 +145,7 @@ int nf_ct_get_tuplepr(const struct sk_buff *skb,
147} 145}
148EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); 146EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
149 147
150int 148bool
151nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 149nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
152 const struct nf_conntrack_tuple *orig, 150 const struct nf_conntrack_tuple *orig,
153 const struct nf_conntrack_l3proto *l3proto, 151 const struct nf_conntrack_l3proto *l3proto,
@@ -157,7 +155,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
157 155
158 inverse->src.l3num = orig->src.l3num; 156 inverse->src.l3num = orig->src.l3num;
159 if (l3proto->invert_tuple(inverse, orig) == 0) 157 if (l3proto->invert_tuple(inverse, orig) == 0)
160 return 0; 158 return false;
161 159
162 inverse->dst.dir = !orig->dst.dir; 160 inverse->dst.dir = !orig->dst.dir;
163 161
@@ -194,8 +192,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
194 * destroy_conntrack() MUST NOT be called with a write lock 192 * destroy_conntrack() MUST NOT be called with a write lock
195 * to nf_conntrack_lock!!! -HW */ 193 * to nf_conntrack_lock!!! -HW */
196 rcu_read_lock(); 194 rcu_read_lock();
197 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, 195 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
198 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
199 if (l4proto && l4proto->destroy) 196 if (l4proto && l4proto->destroy)
200 l4proto->destroy(ct); 197 l4proto->destroy(ct);
201 198
@@ -739,10 +736,10 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb)
739} 736}
740EXPORT_SYMBOL_GPL(nf_conntrack_in); 737EXPORT_SYMBOL_GPL(nf_conntrack_in);
741 738
742int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 739bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
743 const struct nf_conntrack_tuple *orig) 740 const struct nf_conntrack_tuple *orig)
744{ 741{
745 int ret; 742 bool ret;
746 743
747 rcu_read_lock(); 744 rcu_read_lock();
748 ret = nf_ct_invert_tuple(inverse, orig, 745 ret = nf_ct_invert_tuple(inverse, orig,
@@ -766,10 +763,10 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
766 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 763 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
767 764
768 pr_debug("Altering reply tuple of %p to ", ct); 765 pr_debug("Altering reply tuple of %p to ", ct);
769 NF_CT_DUMP_TUPLE(newreply); 766 nf_ct_dump_tuple(newreply);
770 767
771 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 768 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
772 if (ct->master || (help && help->expecting != 0)) 769 if (ct->master || (help && !hlist_empty(&help->expectations)))
773 return; 770 return;
774 771
775 rcu_read_lock(); 772 rcu_read_lock();
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 684ec9c1ad38..e31beeb33b2b 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -54,7 +54,7 @@ void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
54 nf_ct_expect_count--; 54 nf_ct_expect_count--;
55 55
56 hlist_del(&exp->lnode); 56 hlist_del(&exp->lnode);
57 master_help->expecting--; 57 master_help->expecting[exp->class]--;
58 nf_ct_expect_put(exp); 58 nf_ct_expect_put(exp);
59 59
60 NF_CT_STAT_INC(expect_delete); 60 NF_CT_STAT_INC(expect_delete);
@@ -126,9 +126,21 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
126struct nf_conntrack_expect * 126struct nf_conntrack_expect *
127nf_ct_find_expectation(const struct nf_conntrack_tuple *tuple) 127nf_ct_find_expectation(const struct nf_conntrack_tuple *tuple)
128{ 128{
129 struct nf_conntrack_expect *exp; 129 struct nf_conntrack_expect *i, *exp = NULL;
130 struct hlist_node *n;
131 unsigned int h;
132
133 if (!nf_ct_expect_count)
134 return NULL;
130 135
131 exp = __nf_ct_expect_find(tuple); 136 h = nf_ct_expect_dst_hash(tuple);
137 hlist_for_each_entry(i, n, &nf_ct_expect_hash[h], hnode) {
138 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
139 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
140 exp = i;
141 break;
142 }
143 }
132 if (!exp) 144 if (!exp)
133 return NULL; 145 return NULL;
134 146
@@ -159,7 +171,7 @@ void nf_ct_remove_expectations(struct nf_conn *ct)
159 struct hlist_node *n, *next; 171 struct hlist_node *n, *next;
160 172
161 /* Optimization: most connection never expect any others. */ 173 /* Optimization: most connection never expect any others. */
162 if (!help || help->expecting == 0) 174 if (!help)
163 return; 175 return;
164 176
165 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 177 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
@@ -193,7 +205,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
193static inline int expect_matches(const struct nf_conntrack_expect *a, 205static inline int expect_matches(const struct nf_conntrack_expect *a,
194 const struct nf_conntrack_expect *b) 206 const struct nf_conntrack_expect *b)
195{ 207{
196 return a->master == b->master 208 return a->master == b->master && a->class == b->class
197 && nf_ct_tuple_equal(&a->tuple, &b->tuple) 209 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
198 && nf_ct_tuple_mask_equal(&a->mask, &b->mask); 210 && nf_ct_tuple_mask_equal(&a->mask, &b->mask);
199} 211}
@@ -228,10 +240,11 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
228} 240}
229EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); 241EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
230 242
231void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family, 243void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
232 union nf_inet_addr *saddr, 244 int family,
233 union nf_inet_addr *daddr, 245 const union nf_inet_addr *saddr,
234 u_int8_t proto, __be16 *src, __be16 *dst) 246 const union nf_inet_addr *daddr,
247 u_int8_t proto, const __be16 *src, const __be16 *dst)
235{ 248{
236 int len; 249 int len;
237 250
@@ -241,6 +254,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
241 len = 16; 254 len = 16;
242 255
243 exp->flags = 0; 256 exp->flags = 0;
257 exp->class = class;
244 exp->expectfn = NULL; 258 exp->expectfn = NULL;
245 exp->helper = NULL; 259 exp->helper = NULL;
246 exp->tuple.src.l3num = family; 260 exp->tuple.src.l3num = family;
@@ -297,19 +311,21 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_put);
297static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) 311static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
298{ 312{
299 struct nf_conn_help *master_help = nfct_help(exp->master); 313 struct nf_conn_help *master_help = nfct_help(exp->master);
314 const struct nf_conntrack_expect_policy *p;
300 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple); 315 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
301 316
302 atomic_inc(&exp->use); 317 atomic_inc(&exp->use);
303 318
304 hlist_add_head(&exp->lnode, &master_help->expectations); 319 hlist_add_head(&exp->lnode, &master_help->expectations);
305 master_help->expecting++; 320 master_help->expecting[exp->class]++;
306 321
307 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); 322 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
308 nf_ct_expect_count++; 323 nf_ct_expect_count++;
309 324
310 setup_timer(&exp->timeout, nf_ct_expectation_timed_out, 325 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
311 (unsigned long)exp); 326 (unsigned long)exp);
312 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ; 327 p = &master_help->helper->expect_policy[exp->class];
328 exp->timeout.expires = jiffies + p->timeout * HZ;
313 add_timer(&exp->timeout); 329 add_timer(&exp->timeout);
314 330
315 atomic_inc(&exp->use); 331 atomic_inc(&exp->use);
@@ -317,35 +333,41 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
317} 333}
318 334
319/* Race with expectations being used means we could have none to find; OK. */ 335/* Race with expectations being used means we could have none to find; OK. */
320static void evict_oldest_expect(struct nf_conn *master) 336static void evict_oldest_expect(struct nf_conn *master,
337 struct nf_conntrack_expect *new)
321{ 338{
322 struct nf_conn_help *master_help = nfct_help(master); 339 struct nf_conn_help *master_help = nfct_help(master);
323 struct nf_conntrack_expect *exp = NULL; 340 struct nf_conntrack_expect *exp, *last = NULL;
324 struct hlist_node *n; 341 struct hlist_node *n;
325 342
326 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) 343 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
327 ; /* nothing */ 344 if (exp->class == new->class)
345 last = exp;
346 }
328 347
329 if (exp && del_timer(&exp->timeout)) { 348 if (last && del_timer(&last->timeout)) {
330 nf_ct_unlink_expect(exp); 349 nf_ct_unlink_expect(last);
331 nf_ct_expect_put(exp); 350 nf_ct_expect_put(last);
332 } 351 }
333} 352}
334 353
335static inline int refresh_timer(struct nf_conntrack_expect *i) 354static inline int refresh_timer(struct nf_conntrack_expect *i)
336{ 355{
337 struct nf_conn_help *master_help = nfct_help(i->master); 356 struct nf_conn_help *master_help = nfct_help(i->master);
357 const struct nf_conntrack_expect_policy *p;
338 358
339 if (!del_timer(&i->timeout)) 359 if (!del_timer(&i->timeout))
340 return 0; 360 return 0;
341 361
342 i->timeout.expires = jiffies + master_help->helper->timeout*HZ; 362 p = &master_help->helper->expect_policy[i->class];
363 i->timeout.expires = jiffies + p->timeout * HZ;
343 add_timer(&i->timeout); 364 add_timer(&i->timeout);
344 return 1; 365 return 1;
345} 366}
346 367
347int nf_ct_expect_related(struct nf_conntrack_expect *expect) 368int nf_ct_expect_related(struct nf_conntrack_expect *expect)
348{ 369{
370 const struct nf_conntrack_expect_policy *p;
349 struct nf_conntrack_expect *i; 371 struct nf_conntrack_expect *i;
350 struct nf_conn *master = expect->master; 372 struct nf_conn *master = expect->master;
351 struct nf_conn_help *master_help = nfct_help(master); 373 struct nf_conn_help *master_help = nfct_help(master);
@@ -374,9 +396,15 @@ int nf_ct_expect_related(struct nf_conntrack_expect *expect)
374 } 396 }
375 } 397 }
376 /* Will be over limit? */ 398 /* Will be over limit? */
377 if (master_help->helper->max_expected && 399 p = &master_help->helper->expect_policy[expect->class];
378 master_help->expecting >= master_help->helper->max_expected) 400 if (p->max_expected &&
379 evict_oldest_expect(master); 401 master_help->expecting[expect->class] >= p->max_expected) {
402 evict_oldest_expect(master, expect);
403 if (master_help->expecting[expect->class] >= p->max_expected) {
404 ret = -EMFILE;
405 goto out;
406 }
407 }
380 408
381 if (nf_ct_expect_count >= nf_ct_expect_max) { 409 if (nf_ct_expect_count >= nf_ct_expect_max) {
382 if (net_ratelimit()) 410 if (net_ratelimit())
@@ -460,6 +488,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
460{ 488{
461 struct nf_conntrack_expect *expect; 489 struct nf_conntrack_expect *expect;
462 struct hlist_node *n = v; 490 struct hlist_node *n = v;
491 char *delim = "";
463 492
464 expect = hlist_entry(n, struct nf_conntrack_expect, hnode); 493 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
465 494
@@ -475,6 +504,14 @@ static int exp_seq_show(struct seq_file *s, void *v)
475 __nf_ct_l3proto_find(expect->tuple.src.l3num), 504 __nf_ct_l3proto_find(expect->tuple.src.l3num),
476 __nf_ct_l4proto_find(expect->tuple.src.l3num, 505 __nf_ct_l4proto_find(expect->tuple.src.l3num,
477 expect->tuple.dst.protonum)); 506 expect->tuple.dst.protonum));
507
508 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
509 seq_printf(s, "PERMANENT");
510 delim = ",";
511 }
512 if (expect->flags & NF_CT_EXPECT_INACTIVE)
513 seq_printf(s, "%sINACTIVE", delim);
514
478 return seq_putc(s, '\n'); 515 return seq_putc(s, '\n');
479} 516}
480 517
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 2bd9963b5b3e..bcc19fa4ed1e 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -71,6 +71,9 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
71 int i, newlen, newoff; 71 int i, newlen, newoff;
72 struct nf_ct_ext_type *t; 72 struct nf_ct_ext_type *t;
73 73
74 /* Conntrack must not be confirmed to avoid races on reallocation. */
75 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
76
74 if (!ct->ext) 77 if (!ct->ext)
75 return nf_ct_ext_create(&ct->ext, id, gfp); 78 return nf_ct_ext_create(&ct->ext, id, gfp);
76 79
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 6770baf2e845..bb20672fe036 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -350,8 +350,9 @@ static int help(struct sk_buff *skb,
350 enum ip_conntrack_info ctinfo) 350 enum ip_conntrack_info ctinfo)
351{ 351{
352 unsigned int dataoff, datalen; 352 unsigned int dataoff, datalen;
353 struct tcphdr _tcph, *th; 353 const struct tcphdr *th;
354 char *fb_ptr; 354 struct tcphdr _tcph;
355 const char *fb_ptr;
355 int ret; 356 int ret;
356 u32 seq; 357 u32 seq;
357 int dir = CTINFO2DIR(ctinfo); 358 int dir = CTINFO2DIR(ctinfo);
@@ -405,7 +406,7 @@ static int help(struct sk_buff *skb,
405 406
406 /* Initialize IP/IPv6 addr to expected address (it's not mentioned 407 /* Initialize IP/IPv6 addr to expected address (it's not mentioned
407 in EPSV responses) */ 408 in EPSV responses) */
408 cmd.l3num = ct->tuplehash[dir].tuple.src.l3num; 409 cmd.l3num = nf_ct_l3num(ct);
409 memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, 410 memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
410 sizeof(cmd.u3.all)); 411 sizeof(cmd.u3.all));
411 412
@@ -452,7 +453,7 @@ static int help(struct sk_buff *skb,
452 daddr = &ct->tuplehash[!dir].tuple.dst.u3; 453 daddr = &ct->tuplehash[!dir].tuple.dst.u3;
453 454
454 /* Update the ftp info */ 455 /* Update the ftp info */
455 if ((cmd.l3num == ct->tuplehash[dir].tuple.src.l3num) && 456 if ((cmd.l3num == nf_ct_l3num(ct)) &&
456 memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, 457 memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
457 sizeof(cmd.u3.all))) { 458 sizeof(cmd.u3.all))) {
458 /* Enrico Scholz's passive FTP to partially RNAT'd ftp 459 /* Enrico Scholz's passive FTP to partially RNAT'd ftp
@@ -483,7 +484,7 @@ static int help(struct sk_buff *skb,
483 daddr = &cmd.u3; 484 daddr = &cmd.u3;
484 } 485 }
485 486
486 nf_ct_expect_init(exp, cmd.l3num, 487 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, cmd.l3num,
487 &ct->tuplehash[!dir].tuple.src.u3, daddr, 488 &ct->tuplehash[!dir].tuple.src.u3, daddr,
488 IPPROTO_TCP, NULL, &cmd.u.tcp.port); 489 IPPROTO_TCP, NULL, &cmd.u.tcp.port);
489 490
@@ -517,6 +518,11 @@ out_update_nl:
517static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly; 518static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
518static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly; 519static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly;
519 520
521static const struct nf_conntrack_expect_policy ftp_exp_policy = {
522 .max_expected = 1,
523 .timeout = 5 * 60,
524};
525
520/* don't make this __exit, since it's called from __init ! */ 526/* don't make this __exit, since it's called from __init ! */
521static void nf_conntrack_ftp_fini(void) 527static void nf_conntrack_ftp_fini(void)
522{ 528{
@@ -556,8 +562,7 @@ static int __init nf_conntrack_ftp_init(void)
556 for (j = 0; j < 2; j++) { 562 for (j = 0; j < 2; j++) {
557 ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); 563 ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
558 ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; 564 ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
559 ftp[i][j].max_expected = 1; 565 ftp[i][j].expect_policy = &ftp_exp_policy;
560 ftp[i][j].timeout = 5 * 60; /* 5 Minutes */
561 ftp[i][j].me = THIS_MODULE; 566 ftp[i][j].me = THIS_MODULE;
562 ftp[i][j].help = help; 567 ftp[i][j].help = help;
563 tmpname = &ftp_names[i][j][0]; 568 tmpname = &ftp_names[i][j][0];
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 898f1922b5b8..95da1a24aab7 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -218,7 +218,6 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
218 union nf_inet_addr *addr, __be16 *port) 218 union nf_inet_addr *addr, __be16 *port)
219{ 219{
220 const unsigned char *p; 220 const unsigned char *p;
221 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
222 int len; 221 int len;
223 222
224 if (taddr->choice != eH245_TransportAddress_unicastAddress) 223 if (taddr->choice != eH245_TransportAddress_unicastAddress)
@@ -226,13 +225,13 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
226 225
227 switch (taddr->unicastAddress.choice) { 226 switch (taddr->unicastAddress.choice) {
228 case eUnicastAddress_iPAddress: 227 case eUnicastAddress_iPAddress:
229 if (family != AF_INET) 228 if (nf_ct_l3num(ct) != AF_INET)
230 return 0; 229 return 0;
231 p = data + taddr->unicastAddress.iPAddress.network; 230 p = data + taddr->unicastAddress.iPAddress.network;
232 len = 4; 231 len = 4;
233 break; 232 break;
234 case eUnicastAddress_iP6Address: 233 case eUnicastAddress_iP6Address:
235 if (family != AF_INET6) 234 if (nf_ct_l3num(ct) != AF_INET6)
236 return 0; 235 return 0;
237 p = data + taddr->unicastAddress.iP6Address.network; 236 p = data + taddr->unicastAddress.iP6Address.network;
238 len = 16; 237 len = 16;
@@ -277,7 +276,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
277 /* Create expect for RTP */ 276 /* Create expect for RTP */
278 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) 277 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
279 return -1; 278 return -1;
280 nf_ct_expect_init(rtp_exp, ct->tuplehash[!dir].tuple.src.l3num, 279 nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
281 &ct->tuplehash[!dir].tuple.src.u3, 280 &ct->tuplehash[!dir].tuple.src.u3,
282 &ct->tuplehash[!dir].tuple.dst.u3, 281 &ct->tuplehash[!dir].tuple.dst.u3,
283 IPPROTO_UDP, NULL, &rtp_port); 282 IPPROTO_UDP, NULL, &rtp_port);
@@ -287,7 +286,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
287 nf_ct_expect_put(rtp_exp); 286 nf_ct_expect_put(rtp_exp);
288 return -1; 287 return -1;
289 } 288 }
290 nf_ct_expect_init(rtcp_exp, ct->tuplehash[!dir].tuple.src.l3num, 289 nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
291 &ct->tuplehash[!dir].tuple.src.u3, 290 &ct->tuplehash[!dir].tuple.src.u3,
292 &ct->tuplehash[!dir].tuple.dst.u3, 291 &ct->tuplehash[!dir].tuple.dst.u3,
293 IPPROTO_UDP, NULL, &rtcp_port); 292 IPPROTO_UDP, NULL, &rtcp_port);
@@ -304,9 +303,9 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
304 if (nf_ct_expect_related(rtp_exp) == 0) { 303 if (nf_ct_expect_related(rtp_exp) == 0) {
305 if (nf_ct_expect_related(rtcp_exp) == 0) { 304 if (nf_ct_expect_related(rtcp_exp) == 0) {
306 pr_debug("nf_ct_h323: expect RTP "); 305 pr_debug("nf_ct_h323: expect RTP ");
307 NF_CT_DUMP_TUPLE(&rtp_exp->tuple); 306 nf_ct_dump_tuple(&rtp_exp->tuple);
308 pr_debug("nf_ct_h323: expect RTCP "); 307 pr_debug("nf_ct_h323: expect RTCP ");
309 NF_CT_DUMP_TUPLE(&rtcp_exp->tuple); 308 nf_ct_dump_tuple(&rtcp_exp->tuple);
310 } else { 309 } else {
311 nf_ct_unexpect_related(rtp_exp); 310 nf_ct_unexpect_related(rtp_exp);
312 ret = -1; 311 ret = -1;
@@ -344,7 +343,7 @@ static int expect_t120(struct sk_buff *skb,
344 /* Create expect for T.120 connections */ 343 /* Create expect for T.120 connections */
345 if ((exp = nf_ct_expect_alloc(ct)) == NULL) 344 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
346 return -1; 345 return -1;
347 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 346 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
348 &ct->tuplehash[!dir].tuple.src.u3, 347 &ct->tuplehash[!dir].tuple.src.u3,
349 &ct->tuplehash[!dir].tuple.dst.u3, 348 &ct->tuplehash[!dir].tuple.dst.u3,
350 IPPROTO_TCP, NULL, &port); 349 IPPROTO_TCP, NULL, &port);
@@ -361,7 +360,7 @@ static int expect_t120(struct sk_buff *skb,
361 } else { /* Conntrack only */ 360 } else { /* Conntrack only */
362 if (nf_ct_expect_related(exp) == 0) { 361 if (nf_ct_expect_related(exp) == 0) {
363 pr_debug("nf_ct_h323: expect T.120 "); 362 pr_debug("nf_ct_h323: expect T.120 ");
364 NF_CT_DUMP_TUPLE(&exp->tuple); 363 nf_ct_dump_tuple(&exp->tuple);
365 } else 364 } else
366 ret = -1; 365 ret = -1;
367 } 366 }
@@ -583,7 +582,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
583 while (get_tpkt_data(skb, protoff, ct, ctinfo, 582 while (get_tpkt_data(skb, protoff, ct, ctinfo,
584 &data, &datalen, &dataoff)) { 583 &data, &datalen, &dataoff)) {
585 pr_debug("nf_ct_h245: TPKT len=%d ", datalen); 584 pr_debug("nf_ct_h245: TPKT len=%d ", datalen);
586 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); 585 nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
587 586
588 /* Decode H.245 signal */ 587 /* Decode H.245 signal */
589 ret = DecodeMultimediaSystemControlMessage(data, datalen, 588 ret = DecodeMultimediaSystemControlMessage(data, datalen,
@@ -612,13 +611,17 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
612} 611}
613 612
614/****************************************************************************/ 613/****************************************************************************/
614static const struct nf_conntrack_expect_policy h245_exp_policy = {
615 .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
616 .timeout = 240,
617};
618
615static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { 619static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
616 .name = "H.245", 620 .name = "H.245",
617 .me = THIS_MODULE, 621 .me = THIS_MODULE,
618 .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
619 .timeout = 240,
620 .tuple.dst.protonum = IPPROTO_UDP, 622 .tuple.dst.protonum = IPPROTO_UDP,
621 .help = h245_help 623 .help = h245_help,
624 .expect_policy = &h245_exp_policy,
622}; 625};
623 626
624/****************************************************************************/ 627/****************************************************************************/
@@ -627,18 +630,17 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
627 union nf_inet_addr *addr, __be16 *port) 630 union nf_inet_addr *addr, __be16 *port)
628{ 631{
629 const unsigned char *p; 632 const unsigned char *p;
630 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
631 int len; 633 int len;
632 634
633 switch (taddr->choice) { 635 switch (taddr->choice) {
634 case eTransportAddress_ipAddress: 636 case eTransportAddress_ipAddress:
635 if (family != AF_INET) 637 if (nf_ct_l3num(ct) != AF_INET)
636 return 0; 638 return 0;
637 p = data + taddr->ipAddress.ip; 639 p = data + taddr->ipAddress.ip;
638 len = 4; 640 len = 4;
639 break; 641 break;
640 case eTransportAddress_ip6Address: 642 case eTransportAddress_ip6Address:
641 if (family != AF_INET6) 643 if (nf_ct_l3num(ct) != AF_INET6)
642 return 0; 644 return 0;
643 p = data + taddr->ip6Address.ip; 645 p = data + taddr->ip6Address.ip;
644 len = 16; 646 len = 16;
@@ -676,7 +678,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
676 /* Create expect for h245 connection */ 678 /* Create expect for h245 connection */
677 if ((exp = nf_ct_expect_alloc(ct)) == NULL) 679 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
678 return -1; 680 return -1;
679 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 681 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
680 &ct->tuplehash[!dir].tuple.src.u3, 682 &ct->tuplehash[!dir].tuple.src.u3,
681 &ct->tuplehash[!dir].tuple.dst.u3, 683 &ct->tuplehash[!dir].tuple.dst.u3,
682 IPPROTO_TCP, NULL, &port); 684 IPPROTO_TCP, NULL, &port);
@@ -693,7 +695,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
693 } else { /* Conntrack only */ 695 } else { /* Conntrack only */
694 if (nf_ct_expect_related(exp) == 0) { 696 if (nf_ct_expect_related(exp) == 0) {
695 pr_debug("nf_ct_q931: expect H.245 "); 697 pr_debug("nf_ct_q931: expect H.245 ");
696 NF_CT_DUMP_TUPLE(&exp->tuple); 698 nf_ct_dump_tuple(&exp->tuple);
697 } else 699 } else
698 ret = -1; 700 ret = -1;
699 } 701 }
@@ -784,7 +786,7 @@ static int expect_callforwarding(struct sk_buff *skb,
784 * we don't need to track the second call */ 786 * we don't need to track the second call */
785 if (callforward_filter && 787 if (callforward_filter &&
786 callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, 788 callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3,
787 ct->tuplehash[!dir].tuple.src.l3num)) { 789 nf_ct_l3num(ct))) {
788 pr_debug("nf_ct_q931: Call Forwarding not tracked\n"); 790 pr_debug("nf_ct_q931: Call Forwarding not tracked\n");
789 return 0; 791 return 0;
790 } 792 }
@@ -792,7 +794,7 @@ static int expect_callforwarding(struct sk_buff *skb,
792 /* Create expect for the second call leg */ 794 /* Create expect for the second call leg */
793 if ((exp = nf_ct_expect_alloc(ct)) == NULL) 795 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
794 return -1; 796 return -1;
795 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 797 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
796 &ct->tuplehash[!dir].tuple.src.u3, &addr, 798 &ct->tuplehash[!dir].tuple.src.u3, &addr,
797 IPPROTO_TCP, NULL, &port); 799 IPPROTO_TCP, NULL, &port);
798 exp->helper = nf_conntrack_helper_q931; 800 exp->helper = nf_conntrack_helper_q931;
@@ -808,7 +810,7 @@ static int expect_callforwarding(struct sk_buff *skb,
808 } else { /* Conntrack only */ 810 } else { /* Conntrack only */
809 if (nf_ct_expect_related(exp) == 0) { 811 if (nf_ct_expect_related(exp) == 0) {
810 pr_debug("nf_ct_q931: expect Call Forwarding "); 812 pr_debug("nf_ct_q931: expect Call Forwarding ");
811 NF_CT_DUMP_TUPLE(&exp->tuple); 813 nf_ct_dump_tuple(&exp->tuple);
812 } else 814 } else
813 ret = -1; 815 ret = -1;
814 } 816 }
@@ -1128,7 +1130,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
1128 while (get_tpkt_data(skb, protoff, ct, ctinfo, 1130 while (get_tpkt_data(skb, protoff, ct, ctinfo,
1129 &data, &datalen, &dataoff)) { 1131 &data, &datalen, &dataoff)) {
1130 pr_debug("nf_ct_q931: TPKT len=%d ", datalen); 1132 pr_debug("nf_ct_q931: TPKT len=%d ", datalen);
1131 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); 1133 nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
1132 1134
1133 /* Decode Q.931 signal */ 1135 /* Decode Q.931 signal */
1134 ret = DecodeQ931(data, datalen, &q931); 1136 ret = DecodeQ931(data, datalen, &q931);
@@ -1156,28 +1158,30 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
1156} 1158}
1157 1159
1158/****************************************************************************/ 1160/****************************************************************************/
1161static const struct nf_conntrack_expect_policy q931_exp_policy = {
1162 /* T.120 and H.245 */
1163 .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4,
1164 .timeout = 240,
1165};
1166
1159static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { 1167static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
1160 { 1168 {
1161 .name = "Q.931", 1169 .name = "Q.931",
1162 .me = THIS_MODULE, 1170 .me = THIS_MODULE,
1163 /* T.120 and H.245 */
1164 .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4,
1165 .timeout = 240,
1166 .tuple.src.l3num = AF_INET, 1171 .tuple.src.l3num = AF_INET,
1167 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), 1172 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT),
1168 .tuple.dst.protonum = IPPROTO_TCP, 1173 .tuple.dst.protonum = IPPROTO_TCP,
1169 .help = q931_help 1174 .help = q931_help,
1175 .expect_policy = &q931_exp_policy,
1170 }, 1176 },
1171 { 1177 {
1172 .name = "Q.931", 1178 .name = "Q.931",
1173 .me = THIS_MODULE, 1179 .me = THIS_MODULE,
1174 /* T.120 and H.245 */
1175 .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4,
1176 .timeout = 240,
1177 .tuple.src.l3num = AF_INET6, 1180 .tuple.src.l3num = AF_INET6,
1178 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), 1181 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT),
1179 .tuple.dst.protonum = IPPROTO_TCP, 1182 .tuple.dst.protonum = IPPROTO_TCP,
1180 .help = q931_help 1183 .help = q931_help,
1184 .expect_policy = &q931_exp_policy,
1181 }, 1185 },
1182}; 1186};
1183 1187
@@ -1261,7 +1265,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1261 /* Create expect for Q.931 */ 1265 /* Create expect for Q.931 */
1262 if ((exp = nf_ct_expect_alloc(ct)) == NULL) 1266 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
1263 return -1; 1267 return -1;
1264 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 1268 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
1265 gkrouted_only ? /* only accept calls from GK? */ 1269 gkrouted_only ? /* only accept calls from GK? */
1266 &ct->tuplehash[!dir].tuple.src.u3 : NULL, 1270 &ct->tuplehash[!dir].tuple.src.u3 : NULL,
1267 &ct->tuplehash[!dir].tuple.dst.u3, 1271 &ct->tuplehash[!dir].tuple.dst.u3,
@@ -1275,7 +1279,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1275 } else { /* Conntrack only */ 1279 } else { /* Conntrack only */
1276 if (nf_ct_expect_related(exp) == 0) { 1280 if (nf_ct_expect_related(exp) == 0) {
1277 pr_debug("nf_ct_ras: expect Q.931 "); 1281 pr_debug("nf_ct_ras: expect Q.931 ");
1278 NF_CT_DUMP_TUPLE(&exp->tuple); 1282 nf_ct_dump_tuple(&exp->tuple);
1279 1283
1280 /* Save port for looking up expect in processing RCF */ 1284 /* Save port for looking up expect in processing RCF */
1281 info->sig_port[dir] = port; 1285 info->sig_port[dir] = port;
@@ -1332,14 +1336,14 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
1332 /* Need new expect */ 1336 /* Need new expect */
1333 if ((exp = nf_ct_expect_alloc(ct)) == NULL) 1337 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
1334 return -1; 1338 return -1;
1335 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 1339 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
1336 &ct->tuplehash[!dir].tuple.src.u3, &addr, 1340 &ct->tuplehash[!dir].tuple.src.u3, &addr,
1337 IPPROTO_UDP, NULL, &port); 1341 IPPROTO_UDP, NULL, &port);
1338 exp->helper = nf_conntrack_helper_ras; 1342 exp->helper = nf_conntrack_helper_ras;
1339 1343
1340 if (nf_ct_expect_related(exp) == 0) { 1344 if (nf_ct_expect_related(exp) == 0) {
1341 pr_debug("nf_ct_ras: expect RAS "); 1345 pr_debug("nf_ct_ras: expect RAS ");
1342 NF_CT_DUMP_TUPLE(&exp->tuple); 1346 nf_ct_dump_tuple(&exp->tuple);
1343 } else 1347 } else
1344 ret = -1; 1348 ret = -1;
1345 1349
@@ -1423,7 +1427,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1423 pr_debug("nf_ct_ras: set Q.931 expect " 1427 pr_debug("nf_ct_ras: set Q.931 expect "
1424 "timeout to %u seconds for", 1428 "timeout to %u seconds for",
1425 info->timeout); 1429 info->timeout);
1426 NF_CT_DUMP_TUPLE(&exp->tuple); 1430 nf_ct_dump_tuple(&exp->tuple);
1427 set_expect_timeout(exp, info->timeout); 1431 set_expect_timeout(exp, info->timeout);
1428 } 1432 }
1429 spin_unlock_bh(&nf_conntrack_lock); 1433 spin_unlock_bh(&nf_conntrack_lock);
@@ -1536,7 +1540,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
1536 /* Need new expect */ 1540 /* Need new expect */
1537 if ((exp = nf_ct_expect_alloc(ct)) == NULL) 1541 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
1538 return -1; 1542 return -1;
1539 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 1543 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
1540 &ct->tuplehash[!dir].tuple.src.u3, &addr, 1544 &ct->tuplehash[!dir].tuple.src.u3, &addr,
1541 IPPROTO_TCP, NULL, &port); 1545 IPPROTO_TCP, NULL, &port);
1542 exp->flags = NF_CT_EXPECT_PERMANENT; 1546 exp->flags = NF_CT_EXPECT_PERMANENT;
@@ -1544,7 +1548,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
1544 1548
1545 if (nf_ct_expect_related(exp) == 0) { 1549 if (nf_ct_expect_related(exp) == 0) {
1546 pr_debug("nf_ct_ras: expect Q.931 "); 1550 pr_debug("nf_ct_ras: expect Q.931 ");
1547 NF_CT_DUMP_TUPLE(&exp->tuple); 1551 nf_ct_dump_tuple(&exp->tuple);
1548 } else 1552 } else
1549 ret = -1; 1553 ret = -1;
1550 1554
@@ -1589,7 +1593,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
1589 /* Need new expect for call signal */ 1593 /* Need new expect for call signal */
1590 if ((exp = nf_ct_expect_alloc(ct)) == NULL) 1594 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
1591 return -1; 1595 return -1;
1592 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 1596 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
1593 &ct->tuplehash[!dir].tuple.src.u3, &addr, 1597 &ct->tuplehash[!dir].tuple.src.u3, &addr,
1594 IPPROTO_TCP, NULL, &port); 1598 IPPROTO_TCP, NULL, &port);
1595 exp->flags = NF_CT_EXPECT_PERMANENT; 1599 exp->flags = NF_CT_EXPECT_PERMANENT;
@@ -1597,7 +1601,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
1597 1601
1598 if (nf_ct_expect_related(exp) == 0) { 1602 if (nf_ct_expect_related(exp) == 0) {
1599 pr_debug("nf_ct_ras: expect Q.931 "); 1603 pr_debug("nf_ct_ras: expect Q.931 ");
1600 NF_CT_DUMP_TUPLE(&exp->tuple); 1604 nf_ct_dump_tuple(&exp->tuple);
1601 } else 1605 } else
1602 ret = -1; 1606 ret = -1;
1603 1607
@@ -1701,7 +1705,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
1701 if (data == NULL) 1705 if (data == NULL)
1702 goto accept; 1706 goto accept;
1703 pr_debug("nf_ct_ras: RAS message len=%d ", datalen); 1707 pr_debug("nf_ct_ras: RAS message len=%d ", datalen);
1704 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); 1708 nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
1705 1709
1706 /* Decode RAS message */ 1710 /* Decode RAS message */
1707 ret = DecodeRasMessage(data, datalen, &ras); 1711 ret = DecodeRasMessage(data, datalen, &ras);
@@ -1728,26 +1732,29 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
1728} 1732}
1729 1733
1730/****************************************************************************/ 1734/****************************************************************************/
1735static const struct nf_conntrack_expect_policy ras_exp_policy = {
1736 .max_expected = 32,
1737 .timeout = 240,
1738};
1739
1731static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { 1740static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1732 { 1741 {
1733 .name = "RAS", 1742 .name = "RAS",
1734 .me = THIS_MODULE, 1743 .me = THIS_MODULE,
1735 .max_expected = 32,
1736 .timeout = 240,
1737 .tuple.src.l3num = AF_INET, 1744 .tuple.src.l3num = AF_INET,
1738 .tuple.src.u.udp.port = __constant_htons(RAS_PORT), 1745 .tuple.src.u.udp.port = __constant_htons(RAS_PORT),
1739 .tuple.dst.protonum = IPPROTO_UDP, 1746 .tuple.dst.protonum = IPPROTO_UDP,
1740 .help = ras_help, 1747 .help = ras_help,
1748 .expect_policy = &ras_exp_policy,
1741 }, 1749 },
1742 { 1750 {
1743 .name = "RAS", 1751 .name = "RAS",
1744 .me = THIS_MODULE, 1752 .me = THIS_MODULE,
1745 .max_expected = 32,
1746 .timeout = 240,
1747 .tuple.src.l3num = AF_INET6, 1753 .tuple.src.l3num = AF_INET6,
1748 .tuple.src.u.udp.port = __constant_htons(RAS_PORT), 1754 .tuple.src.u.udp.port = __constant_htons(RAS_PORT),
1749 .tuple.dst.protonum = IPPROTO_UDP, 1755 .tuple.dst.protonum = IPPROTO_UDP,
1750 .help = ras_help, 1756 .help = ras_help,
1757 .expect_policy = &ras_exp_policy,
1751 }, 1758 },
1752}; 1759};
1753 1760
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index b1fd21cc1dbc..7d1b11703741 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -110,7 +110,8 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
110{ 110{
111 unsigned int h = helper_hash(&me->tuple); 111 unsigned int h = helper_hash(&me->tuple);
112 112
113 BUG_ON(me->timeout == 0); 113 BUG_ON(me->expect_policy == NULL);
114 BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
114 115
115 mutex_lock(&nf_ct_helper_mutex); 116 mutex_lock(&nf_ct_helper_mutex);
116 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); 117 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
@@ -125,7 +126,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
125{ 126{
126 struct nf_conntrack_tuple_hash *h; 127 struct nf_conntrack_tuple_hash *h;
127 struct nf_conntrack_expect *exp; 128 struct nf_conntrack_expect *exp;
128 struct hlist_node *n, *next; 129 const struct hlist_node *n, *next;
129 unsigned int i; 130 unsigned int i;
130 131
131 mutex_lock(&nf_ct_helper_mutex); 132 mutex_lock(&nf_ct_helper_mutex);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index c336b07a0d4c..1b1226d6653f 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per "
50module_param(dcc_timeout, uint, 0400); 50module_param(dcc_timeout, uint, 0400);
51MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels"); 51MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
52 52
53static const char *dccprotos[] = { 53static const char *const dccprotos[] = {
54 "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " 54 "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
55}; 55};
56 56
@@ -65,7 +65,7 @@ static const char *dccprotos[] = {
65 * ad_beg_p returns pointer to first byte of addr data 65 * ad_beg_p returns pointer to first byte of addr data
66 * ad_end_p returns pointer to last byte of addr data 66 * ad_end_p returns pointer to last byte of addr data
67 */ 67 */
68static int parse_dcc(char *data, char *data_end, u_int32_t *ip, 68static int parse_dcc(char *data, const char *data_end, u_int32_t *ip,
69 u_int16_t *port, char **ad_beg_p, char **ad_end_p) 69 u_int16_t *port, char **ad_beg_p, char **ad_end_p)
70{ 70{
71 /* at least 12: "AAAAAAAA P\1\n" */ 71 /* at least 12: "AAAAAAAA P\1\n" */
@@ -93,9 +93,11 @@ static int help(struct sk_buff *skb, unsigned int protoff,
93 struct nf_conn *ct, enum ip_conntrack_info ctinfo) 93 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
94{ 94{
95 unsigned int dataoff; 95 unsigned int dataoff;
96 struct iphdr *iph; 96 const struct iphdr *iph;
97 struct tcphdr _tcph, *th; 97 const struct tcphdr *th;
98 char *data, *data_limit, *ib_ptr; 98 struct tcphdr _tcph;
99 const char *data_limit;
100 char *data, *ib_ptr;
99 int dir = CTINFO2DIR(ctinfo); 101 int dir = CTINFO2DIR(ctinfo);
100 struct nf_conntrack_expect *exp; 102 struct nf_conntrack_expect *exp;
101 struct nf_conntrack_tuple *tuple; 103 struct nf_conntrack_tuple *tuple;
@@ -159,7 +161,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
159 /* we have at least 161 /* we have at least
160 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid 162 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
161 * data left (== 14/13 bytes) */ 163 * data left (== 14/13 bytes) */
162 if (parse_dcc((char *)data, data_limit, &dcc_ip, 164 if (parse_dcc(data, data_limit, &dcc_ip,
163 &dcc_port, &addr_beg_p, &addr_end_p)) { 165 &dcc_port, &addr_beg_p, &addr_end_p)) {
164 pr_debug("unable to parse dcc command\n"); 166 pr_debug("unable to parse dcc command\n");
165 continue; 167 continue;
@@ -187,7 +189,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
187 } 189 }
188 tuple = &ct->tuplehash[!dir].tuple; 190 tuple = &ct->tuplehash[!dir].tuple;
189 port = htons(dcc_port); 191 port = htons(dcc_port);
190 nf_ct_expect_init(exp, tuple->src.l3num, 192 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
193 tuple->src.l3num,
191 NULL, &tuple->dst.u3, 194 NULL, &tuple->dst.u3,
192 IPPROTO_TCP, NULL, &port); 195 IPPROTO_TCP, NULL, &port);
193 196
@@ -210,6 +213,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
210 213
211static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly; 214static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly;
212static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly; 215static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly;
216static struct nf_conntrack_expect_policy irc_exp_policy;
213 217
214static void nf_conntrack_irc_fini(void); 218static void nf_conntrack_irc_fini(void);
215 219
@@ -223,6 +227,9 @@ static int __init nf_conntrack_irc_init(void)
223 return -EINVAL; 227 return -EINVAL;
224 } 228 }
225 229
230 irc_exp_policy.max_expected = max_dcc_channels;
231 irc_exp_policy.timeout = dcc_timeout;
232
226 irc_buffer = kmalloc(65536, GFP_KERNEL); 233 irc_buffer = kmalloc(65536, GFP_KERNEL);
227 if (!irc_buffer) 234 if (!irc_buffer)
228 return -ENOMEM; 235 return -ENOMEM;
@@ -235,8 +242,7 @@ static int __init nf_conntrack_irc_init(void)
235 irc[i].tuple.src.l3num = AF_INET; 242 irc[i].tuple.src.l3num = AF_INET;
236 irc[i].tuple.src.u.tcp.port = htons(ports[i]); 243 irc[i].tuple.src.u.tcp.port = htons(ports[i]);
237 irc[i].tuple.dst.protonum = IPPROTO_TCP; 244 irc[i].tuple.dst.protonum = IPPROTO_TCP;
238 irc[i].max_expected = max_dcc_channels; 245 irc[i].expect_policy = &irc_exp_policy;
239 irc[i].timeout = dcc_timeout;
240 irc[i].me = THIS_MODULE; 246 irc[i].me = THIS_MODULE;
241 irc[i].help = help; 247 irc[i].help = help;
242 248
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
index 8e914e5ffea8..e7eb807fe07d 100644
--- a/net/netfilter/nf_conntrack_l3proto_generic.c
+++ b/net/netfilter/nf_conntrack_l3proto_generic.c
@@ -31,22 +31,22 @@
31#include <net/netfilter/nf_conntrack_core.h> 31#include <net/netfilter/nf_conntrack_core.h>
32#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 32#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
33 33
34static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 34static bool generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
35 struct nf_conntrack_tuple *tuple) 35 struct nf_conntrack_tuple *tuple)
36{ 36{
37 memset(&tuple->src.u3, 0, sizeof(tuple->src.u3)); 37 memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
38 memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3)); 38 memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
39 39
40 return 1; 40 return true;
41} 41}
42 42
43static int generic_invert_tuple(struct nf_conntrack_tuple *tuple, 43static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple,
44 const struct nf_conntrack_tuple *orig) 44 const struct nf_conntrack_tuple *orig)
45{ 45{
46 memset(&tuple->src.u3, 0, sizeof(tuple->src.u3)); 46 memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
47 memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3)); 47 memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
48 48
49 return 1; 49 return true;
50} 50}
51 51
52static int generic_print_tuple(struct seq_file *s, 52static int generic_print_tuple(struct seq_file *s,
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index 9810d81e2a06..08404e6755fb 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -47,7 +47,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
47{ 47{
48 struct nf_conntrack_expect *exp; 48 struct nf_conntrack_expect *exp;
49 struct iphdr *iph = ip_hdr(skb); 49 struct iphdr *iph = ip_hdr(skb);
50 struct rtable *rt = (struct rtable *)skb->dst; 50 struct rtable *rt = skb->rtable;
51 struct in_device *in_dev; 51 struct in_device *in_dev;
52 __be32 mask = 0; 52 __be32 mask = 0;
53 53
@@ -86,6 +86,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
86 86
87 exp->expectfn = NULL; 87 exp->expectfn = NULL;
88 exp->flags = NF_CT_EXPECT_PERMANENT; 88 exp->flags = NF_CT_EXPECT_PERMANENT;
89 exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
89 exp->helper = NULL; 90 exp->helper = NULL;
90 91
91 nf_ct_expect_related(exp); 92 nf_ct_expect_related(exp);
@@ -96,19 +97,23 @@ out:
96 return NF_ACCEPT; 97 return NF_ACCEPT;
97} 98}
98 99
100static struct nf_conntrack_expect_policy exp_policy = {
101 .max_expected = 1,
102};
103
99static struct nf_conntrack_helper helper __read_mostly = { 104static struct nf_conntrack_helper helper __read_mostly = {
100 .name = "netbios-ns", 105 .name = "netbios-ns",
101 .tuple.src.l3num = AF_INET, 106 .tuple.src.l3num = AF_INET,
102 .tuple.src.u.udp.port = __constant_htons(NMBD_PORT), 107 .tuple.src.u.udp.port = __constant_htons(NMBD_PORT),
103 .tuple.dst.protonum = IPPROTO_UDP, 108 .tuple.dst.protonum = IPPROTO_UDP,
104 .max_expected = 1,
105 .me = THIS_MODULE, 109 .me = THIS_MODULE,
106 .help = help, 110 .help = help,
111 .expect_policy = &exp_policy,
107}; 112};
108 113
109static int __init nf_conntrack_netbios_ns_init(void) 114static int __init nf_conntrack_netbios_ns_init(void)
110{ 115{
111 helper.timeout = timeout; 116 exp_policy.timeout = timeout;
112 return nf_conntrack_helper_register(&helper); 117 return nf_conntrack_helper_register(&helper);
113} 118}
114 119
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4a1b42b2b7a5..16774ecd1c4e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -145,10 +145,11 @@ nla_put_failure:
145static inline int 145static inline int
146ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct) 146ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct)
147{ 147{
148 struct nf_conntrack_l4proto *l4proto = nf_ct_l4proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); 148 struct nf_conntrack_l4proto *l4proto;
149 struct nlattr *nest_proto; 149 struct nlattr *nest_proto;
150 int ret; 150 int ret;
151 151
152 l4proto = nf_ct_l4proto_find_get(nf_ct_l3num(ct), nf_ct_protonum(ct));
152 if (!l4proto->to_nlattr) { 153 if (!l4proto->to_nlattr) {
153 nf_ct_l4proto_put(l4proto); 154 nf_ct_l4proto_put(l4proto);
154 return 0; 155 return 0;
@@ -368,8 +369,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
368 nfmsg = NLMSG_DATA(nlh); 369 nfmsg = NLMSG_DATA(nlh);
369 370
370 nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; 371 nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
371 nfmsg->nfgen_family = 372 nfmsg->nfgen_family = nf_ct_l3num(ct);
372 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
373 nfmsg->version = NFNETLINK_V0; 373 nfmsg->version = NFNETLINK_V0;
374 nfmsg->res_id = 0; 374 nfmsg->res_id = 0;
375 375
@@ -454,7 +454,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
454 nfmsg = NLMSG_DATA(nlh); 454 nfmsg = NLMSG_DATA(nlh);
455 455
456 nlh->nlmsg_flags = flags; 456 nlh->nlmsg_flags = flags;
457 nfmsg->nfgen_family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; 457 nfmsg->nfgen_family = nf_ct_l3num(ct);
458 nfmsg->version = NFNETLINK_V0; 458 nfmsg->version = NFNETLINK_V0;
459 nfmsg->res_id = 0; 459 nfmsg->res_id = 0;
460 460
@@ -535,8 +535,6 @@ static int ctnetlink_done(struct netlink_callback *cb)
535 return 0; 535 return 0;
536} 536}
537 537
538#define L3PROTO(ct) (ct)->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num
539
540static int 538static int
541ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 539ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
542{ 540{
@@ -558,7 +556,7 @@ restart:
558 /* Dump entries of a given L3 protocol number. 556 /* Dump entries of a given L3 protocol number.
559 * If it is not specified, ie. l3proto == 0, 557 * If it is not specified, ie. l3proto == 0,
560 * then dump everything. */ 558 * then dump everything. */
561 if (l3proto && L3PROTO(ct) != l3proto) 559 if (l3proto && nf_ct_l3num(ct) != l3proto)
562 continue; 560 continue;
563 if (cb->args[1]) { 561 if (cb->args[1]) {
564 if (ct != last) 562 if (ct != last)
@@ -704,20 +702,11 @@ static int nfnetlink_parse_nat_proto(struct nlattr *attr,
704 if (err < 0) 702 if (err < 0)
705 return err; 703 return err;
706 704
707 npt = nf_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); 705 npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
708 706 if (npt->nlattr_to_range)
709 if (!npt->nlattr_to_range) { 707 err = npt->nlattr_to_range(tb, range);
710 nf_nat_proto_put(npt);
711 return 0;
712 }
713
714 /* nlattr_to_range returns 1 if it parsed, 0 if not, neg. on error */
715 if (npt->nlattr_to_range(tb, range) > 0)
716 range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
717
718 nf_nat_proto_put(npt); 708 nf_nat_proto_put(npt);
719 709 return err;
720 return 0;
721} 710}
722 711
723static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { 712static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
@@ -1010,14 +999,11 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[])
1010{ 999{
1011 struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO]; 1000 struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO];
1012 struct nf_conntrack_l4proto *l4proto; 1001 struct nf_conntrack_l4proto *l4proto;
1013 u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
1014 u_int16_t l3num = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
1015 int err = 0; 1002 int err = 0;
1016 1003
1017 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); 1004 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL);
1018 1005
1019 l4proto = nf_ct_l4proto_find_get(l3num, npt); 1006 l4proto = nf_ct_l4proto_find_get(nf_ct_l3num(ct), nf_ct_protonum(ct));
1020
1021 if (l4proto->from_nlattr) 1007 if (l4proto->from_nlattr)
1022 err = l4proto->from_nlattr(tb, ct); 1008 err = l4proto->from_nlattr(tb, ct);
1023 nf_ct_l4proto_put(l4proto); 1009 nf_ct_l4proto_put(l4proto);
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index b5cb8e831230..97e54b0e43a3 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -119,7 +119,7 @@ static void pptp_expectfn(struct nf_conn *ct,
119 /* obviously this tuple inversion only works until you do NAT */ 119 /* obviously this tuple inversion only works until you do NAT */
120 nf_ct_invert_tuplepr(&inv_t, &exp->tuple); 120 nf_ct_invert_tuplepr(&inv_t, &exp->tuple);
121 pr_debug("trying to unexpect other dir: "); 121 pr_debug("trying to unexpect other dir: ");
122 NF_CT_DUMP_TUPLE(&inv_t); 122 nf_ct_dump_tuple(&inv_t);
123 123
124 exp_other = nf_ct_expect_find_get(&inv_t); 124 exp_other = nf_ct_expect_find_get(&inv_t);
125 if (exp_other) { 125 if (exp_other) {
@@ -141,7 +141,7 @@ static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
141 struct nf_conn *sibling; 141 struct nf_conn *sibling;
142 142
143 pr_debug("trying to timeout ct or exp for tuple "); 143 pr_debug("trying to timeout ct or exp for tuple ");
144 NF_CT_DUMP_TUPLE(t); 144 nf_ct_dump_tuple(t);
145 145
146 h = nf_conntrack_find_get(t); 146 h = nf_conntrack_find_get(t);
147 if (h) { 147 if (h) {
@@ -208,7 +208,8 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
208 208
209 /* original direction, PNS->PAC */ 209 /* original direction, PNS->PAC */
210 dir = IP_CT_DIR_ORIGINAL; 210 dir = IP_CT_DIR_ORIGINAL;
211 nf_ct_expect_init(exp_orig, ct->tuplehash[dir].tuple.src.l3num, 211 nf_ct_expect_init(exp_orig, NF_CT_EXPECT_CLASS_DEFAULT,
212 nf_ct_l3num(ct),
212 &ct->tuplehash[dir].tuple.src.u3, 213 &ct->tuplehash[dir].tuple.src.u3,
213 &ct->tuplehash[dir].tuple.dst.u3, 214 &ct->tuplehash[dir].tuple.dst.u3,
214 IPPROTO_GRE, &peer_callid, &callid); 215 IPPROTO_GRE, &peer_callid, &callid);
@@ -216,7 +217,8 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
216 217
217 /* reply direction, PAC->PNS */ 218 /* reply direction, PAC->PNS */
218 dir = IP_CT_DIR_REPLY; 219 dir = IP_CT_DIR_REPLY;
219 nf_ct_expect_init(exp_reply, ct->tuplehash[dir].tuple.src.l3num, 220 nf_ct_expect_init(exp_reply, NF_CT_EXPECT_CLASS_DEFAULT,
221 nf_ct_l3num(ct),
220 &ct->tuplehash[dir].tuple.src.u3, 222 &ct->tuplehash[dir].tuple.src.u3,
221 &ct->tuplehash[dir].tuple.dst.u3, 223 &ct->tuplehash[dir].tuple.dst.u3,
222 IPPROTO_GRE, &callid, &peer_callid); 224 IPPROTO_GRE, &callid, &peer_callid);
@@ -575,17 +577,21 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
575 return ret; 577 return ret;
576} 578}
577 579
580static const struct nf_conntrack_expect_policy pptp_exp_policy = {
581 .max_expected = 2,
582 .timeout = 5 * 60,
583};
584
578/* control protocol helper */ 585/* control protocol helper */
579static struct nf_conntrack_helper pptp __read_mostly = { 586static struct nf_conntrack_helper pptp __read_mostly = {
580 .name = "pptp", 587 .name = "pptp",
581 .me = THIS_MODULE, 588 .me = THIS_MODULE,
582 .max_expected = 2,
583 .timeout = 5 * 60,
584 .tuple.src.l3num = AF_INET, 589 .tuple.src.l3num = AF_INET,
585 .tuple.src.u.tcp.port = __constant_htons(PPTP_CONTROL_PORT), 590 .tuple.src.u.tcp.port = __constant_htons(PPTP_CONTROL_PORT),
586 .tuple.dst.protonum = IPPROTO_TCP, 591 .tuple.dst.protonum = IPPROTO_TCP,
587 .help = conntrack_pptp_help, 592 .help = conntrack_pptp_help,
588 .destroy = pptp_destroy_siblings, 593 .destroy = pptp_destroy_siblings,
594 .expect_policy = &pptp_exp_policy,
589}; 595};
590 596
591static int __init nf_conntrack_pptp_init(void) 597static int __init nf_conntrack_pptp_init(void)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 8595b5946acf..a49fc932629b 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -146,18 +146,15 @@ EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
146 146
147static int kill_l3proto(struct nf_conn *i, void *data) 147static int kill_l3proto(struct nf_conn *i, void *data)
148{ 148{
149 return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == 149 return nf_ct_l3num(i) == ((struct nf_conntrack_l3proto *)data)->l3proto;
150 ((struct nf_conntrack_l3proto *)data)->l3proto);
151} 150}
152 151
153static int kill_l4proto(struct nf_conn *i, void *data) 152static int kill_l4proto(struct nf_conn *i, void *data)
154{ 153{
155 struct nf_conntrack_l4proto *l4proto; 154 struct nf_conntrack_l4proto *l4proto;
156 l4proto = (struct nf_conntrack_l4proto *)data; 155 l4proto = (struct nf_conntrack_l4proto *)data;
157 return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == 156 return nf_ct_protonum(i) == l4proto->l4proto &&
158 l4proto->l4proto) && 157 nf_ct_l3num(i) == l4proto->l3proto;
159 (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
160 l4proto->l3proto);
161} 158}
162 159
163static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto) 160static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
new file mode 100644
index 000000000000..afb4a1861d2c
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -0,0 +1,815 @@
1/*
2 * DCCP connection tracking protocol helper
3 *
4 * Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/sysctl.h>
15#include <linux/spinlock.h>
16#include <linux/skbuff.h>
17#include <linux/dccp.h>
18
19#include <linux/netfilter/nfnetlink_conntrack.h>
20#include <net/netfilter/nf_conntrack.h>
21#include <net/netfilter/nf_conntrack_l4proto.h>
22#include <net/netfilter/nf_log.h>
23
24static DEFINE_RWLOCK(dccp_lock);
25
26static int nf_ct_dccp_loose __read_mostly = 1;
27
28/* Timeouts are based on values from RFC4340:
29 *
30 * - REQUEST:
31 *
32 * 8.1.2. Client Request
33 *
34 * A client MAY give up on its DCCP-Requests after some time
35 * (3 minutes, for example).
36 *
37 * - RESPOND:
38 *
39 * 8.1.3. Server Response
40 *
41 * It MAY also leave the RESPOND state for CLOSED after a timeout of
42 * not less than 4MSL (8 minutes);
43 *
44 * - PARTOPEN:
45 *
46 * 8.1.5. Handshake Completion
47 *
48 * If the client remains in PARTOPEN for more than 4MSL (8 minutes),
49 * it SHOULD reset the connection with Reset Code 2, "Aborted".
50 *
51 * - OPEN:
52 *
53 * The DCCP timestamp overflows after 11.9 hours. If the connection
54 * stays idle this long the sequence number won't be recognized
55 * as valid anymore.
56 *
57 * - CLOSEREQ/CLOSING:
58 *
59 * 8.3. Termination
60 *
61 * The retransmission timer should initially be set to go off in two
62 * round-trip times and should back off to not less than once every
63 * 64 seconds ...
64 *
65 * - TIMEWAIT:
66 *
67 * 4.3. States
68 *
69 * A server or client socket remains in this state for 2MSL (4 minutes)
70 * after the connection has been town down, ...
71 */
72
73#define DCCP_MSL (2 * 60 * HZ)
74
75static unsigned int dccp_timeout[CT_DCCP_MAX + 1] __read_mostly = {
76 [CT_DCCP_REQUEST] = 2 * DCCP_MSL,
77 [CT_DCCP_RESPOND] = 4 * DCCP_MSL,
78 [CT_DCCP_PARTOPEN] = 4 * DCCP_MSL,
79 [CT_DCCP_OPEN] = 12 * 3600 * HZ,
80 [CT_DCCP_CLOSEREQ] = 64 * HZ,
81 [CT_DCCP_CLOSING] = 64 * HZ,
82 [CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL,
83};
84
85static const char * const dccp_state_names[] = {
86 [CT_DCCP_NONE] = "NONE",
87 [CT_DCCP_REQUEST] = "REQUEST",
88 [CT_DCCP_RESPOND] = "RESPOND",
89 [CT_DCCP_PARTOPEN] = "PARTOPEN",
90 [CT_DCCP_OPEN] = "OPEN",
91 [CT_DCCP_CLOSEREQ] = "CLOSEREQ",
92 [CT_DCCP_CLOSING] = "CLOSING",
93 [CT_DCCP_TIMEWAIT] = "TIMEWAIT",
94 [CT_DCCP_IGNORE] = "IGNORE",
95 [CT_DCCP_INVALID] = "INVALID",
96};
97
98#define sNO CT_DCCP_NONE
99#define sRQ CT_DCCP_REQUEST
100#define sRS CT_DCCP_RESPOND
101#define sPO CT_DCCP_PARTOPEN
102#define sOP CT_DCCP_OPEN
103#define sCR CT_DCCP_CLOSEREQ
104#define sCG CT_DCCP_CLOSING
105#define sTW CT_DCCP_TIMEWAIT
106#define sIG CT_DCCP_IGNORE
107#define sIV CT_DCCP_INVALID
108
109/*
110 * DCCP state transistion table
111 *
112 * The assumption is the same as for TCP tracking:
113 *
114 * We are the man in the middle. All the packets go through us but might
115 * get lost in transit to the destination. It is assumed that the destination
116 * can't receive segments we haven't seen.
117 *
118 * The following states exist:
119 *
120 * NONE: Initial state, expecting Request
121 * REQUEST: Request seen, waiting for Response from server
122 * RESPOND: Response from server seen, waiting for Ack from client
123 * PARTOPEN: Ack after Response seen, waiting for packet other than Response,
124 * Reset or Sync from server
125 * OPEN: Packet other than Response, Reset or Sync seen
126 * CLOSEREQ: CloseReq from server seen, expecting Close from client
127 * CLOSING: Close seen, expecting Reset
128 * TIMEWAIT: Reset seen
129 * IGNORE: Not determinable whether packet is valid
130 *
131 * Some states exist only on one side of the connection: REQUEST, RESPOND,
132 * PARTOPEN, CLOSEREQ. For the other side these states are equivalent to
133 * the one it was in before.
134 *
135 * Packets are marked as ignored (sIG) if we don't know if they're valid
136 * (for example a reincarnation of a connection we didn't notice is dead
137 * already) and the server may send back a connection closing Reset or a
138 * Response. They're also used for Sync/SyncAck packets, which we don't
139 * care about.
140 */
141static const u_int8_t
142dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = {
143 [CT_DCCP_ROLE_CLIENT] = {
144 [DCCP_PKT_REQUEST] = {
145 /*
146 * sNO -> sRQ Regular Request
147 * sRQ -> sRQ Retransmitted Request or reincarnation
148 * sRS -> sRS Retransmitted Request (apparently Response
149 * got lost after we saw it) or reincarnation
150 * sPO -> sIG Ignore, conntrack might be out of sync
151 * sOP -> sIG Ignore, conntrack might be out of sync
152 * sCR -> sIG Ignore, conntrack might be out of sync
153 * sCG -> sIG Ignore, conntrack might be out of sync
154 * sTW -> sRQ Reincarnation
155 *
156 * sNO, sRQ, sRS, sPO. sOP, sCR, sCG, sTW, */
157 sRQ, sRQ, sRS, sIG, sIG, sIG, sIG, sRQ,
158 },
159 [DCCP_PKT_RESPONSE] = {
160 /*
161 * sNO -> sIV Invalid
162 * sRQ -> sIG Ignore, might be response to ignored Request
163 * sRS -> sIG Ignore, might be response to ignored Request
164 * sPO -> sIG Ignore, might be response to ignored Request
165 * sOP -> sIG Ignore, might be response to ignored Request
166 * sCR -> sIG Ignore, might be response to ignored Request
167 * sCG -> sIG Ignore, might be response to ignored Request
168 * sTW -> sIV Invalid, reincarnation in reverse direction
169 * goes through sRQ
170 *
171 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
172 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIV,
173 },
174 [DCCP_PKT_ACK] = {
175 /*
176 * sNO -> sIV No connection
177 * sRQ -> sIV No connection
178 * sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
179 * sPO -> sPO Retransmitted Ack for Response, remain in PARTOPEN
180 * sOP -> sOP Regular ACK, remain in OPEN
181 * sCR -> sCR Ack in CLOSEREQ MAY be processed (8.3.)
182 * sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
183 * sTW -> sIV
184 *
185 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
186 sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
187 },
188 [DCCP_PKT_DATA] = {
189 /*
190 * sNO -> sIV No connection
191 * sRQ -> sIV No connection
192 * sRS -> sIV No connection
193 * sPO -> sIV MUST use DataAck in PARTOPEN state (8.1.5.)
194 * sOP -> sOP Regular Data packet
195 * sCR -> sCR Data in CLOSEREQ MAY be processed (8.3.)
196 * sCG -> sCG Data in CLOSING MAY be processed (8.3.)
197 * sTW -> sIV
198 *
199 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
200 sIV, sIV, sIV, sIV, sOP, sCR, sCG, sIV,
201 },
202 [DCCP_PKT_DATAACK] = {
203 /*
204 * sNO -> sIV No connection
205 * sRQ -> sIV No connection
206 * sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
207 * sPO -> sPO Remain in PARTOPEN state
208 * sOP -> sOP Regular DataAck packet in OPEN state
209 * sCR -> sCR DataAck in CLOSEREQ MAY be processed (8.3.)
210 * sCG -> sCG DataAck in CLOSING MAY be processed (8.3.)
211 * sTW -> sIV
212 *
213 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
214 sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
215 },
216 [DCCP_PKT_CLOSEREQ] = {
217 /*
218 * CLOSEREQ may only be sent by the server.
219 *
220 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
221 sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV
222 },
223 [DCCP_PKT_CLOSE] = {
224 /*
225 * sNO -> sIV No connection
226 * sRQ -> sIV No connection
227 * sRS -> sIV No connection
228 * sPO -> sCG Client-initiated close
229 * sOP -> sCG Client-initiated close
230 * sCR -> sCG Close in response to CloseReq (8.3.)
231 * sCG -> sCG Retransmit
232 * sTW -> sIV Late retransmit, already in TIME_WAIT
233 *
234 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
235 sIV, sIV, sIV, sCG, sCG, sCG, sIV, sIV
236 },
237 [DCCP_PKT_RESET] = {
238 /*
239 * sNO -> sIV No connection
240 * sRQ -> sTW Sync received or timeout, SHOULD send Reset (8.1.1.)
241 * sRS -> sTW Response received without Request
242 * sPO -> sTW Timeout, SHOULD send Reset (8.1.5.)
243 * sOP -> sTW Connection reset
244 * sCR -> sTW Connection reset
245 * sCG -> sTW Connection reset
246 * sTW -> sIG Ignore (don't refresh timer)
247 *
248 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
249 sIV, sTW, sTW, sTW, sTW, sTW, sTW, sIG
250 },
251 [DCCP_PKT_SYNC] = {
252 /*
253 * We currently ignore Sync packets
254 *
255 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
256 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
257 },
258 [DCCP_PKT_SYNCACK] = {
259 /*
260 * We currently ignore SyncAck packets
261 *
262 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
263 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
264 },
265 },
266 [CT_DCCP_ROLE_SERVER] = {
267 [DCCP_PKT_REQUEST] = {
268 /*
269 * sNO -> sIV Invalid
270 * sRQ -> sIG Ignore, conntrack might be out of sync
271 * sRS -> sIG Ignore, conntrack might be out of sync
272 * sPO -> sIG Ignore, conntrack might be out of sync
273 * sOP -> sIG Ignore, conntrack might be out of sync
274 * sCR -> sIG Ignore, conntrack might be out of sync
275 * sCG -> sIG Ignore, conntrack might be out of sync
276 * sTW -> sRQ Reincarnation, must reverse roles
277 *
278 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
279 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sRQ
280 },
281 [DCCP_PKT_RESPONSE] = {
282 /*
283 * sNO -> sIV Response without Request
284 * sRQ -> sRS Response to clients Request
285 * sRS -> sRS Retransmitted Response (8.1.3. SHOULD NOT)
286 * sPO -> sIG Response to an ignored Request or late retransmit
287 * sOP -> sIG Ignore, might be response to ignored Request
288 * sCR -> sIG Ignore, might be response to ignored Request
289 * sCG -> sIG Ignore, might be response to ignored Request
290 * sTW -> sIV Invalid, Request from client in sTW moves to sRQ
291 *
292 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
293 sIV, sRS, sRS, sIG, sIG, sIG, sIG, sIV
294 },
295 [DCCP_PKT_ACK] = {
296 /*
297 * sNO -> sIV No connection
298 * sRQ -> sIV No connection
299 * sRS -> sIV No connection
300 * sPO -> sOP Enter OPEN state (8.1.5.)
301 * sOP -> sOP Regular Ack in OPEN state
302 * sCR -> sIV Waiting for Close from client
303 * sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
304 * sTW -> sIV
305 *
306 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
307 sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
308 },
309 [DCCP_PKT_DATA] = {
310 /*
311 * sNO -> sIV No connection
312 * sRQ -> sIV No connection
313 * sRS -> sIV No connection
314 * sPO -> sOP Enter OPEN state (8.1.5.)
315 * sOP -> sOP Regular Data packet in OPEN state
316 * sCR -> sIV Waiting for Close from client
317 * sCG -> sCG Data in CLOSING MAY be processed (8.3.)
318 * sTW -> sIV
319 *
320 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
321 sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
322 },
323 [DCCP_PKT_DATAACK] = {
324 /*
325 * sNO -> sIV No connection
326 * sRQ -> sIV No connection
327 * sRS -> sIV No connection
328 * sPO -> sOP Enter OPEN state (8.1.5.)
329 * sOP -> sOP Regular DataAck in OPEN state
330 * sCR -> sIV Waiting for Close from client
331 * sCG -> sCG Data in CLOSING MAY be processed (8.3.)
332 * sTW -> sIV
333 *
334 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
335 sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
336 },
337 [DCCP_PKT_CLOSEREQ] = {
338 /*
339 * sNO -> sIV No connection
340 * sRQ -> sIV No connection
341 * sRS -> sIV No connection
342 * sPO -> sOP -> sCR Move directly to CLOSEREQ (8.1.5.)
343 * sOP -> sCR CloseReq in OPEN state
344 * sCR -> sCR Retransmit
345 * sCG -> sCR Simultaneous close, client sends another Close
346 * sTW -> sIV Already closed
347 *
348 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
349 sIV, sIV, sIV, sCR, sCR, sCR, sCR, sIV
350 },
351 [DCCP_PKT_CLOSE] = {
352 /*
353 * sNO -> sIV No connection
354 * sRQ -> sIV No connection
355 * sRS -> sIV No connection
356 * sPO -> sOP -> sCG Move direcly to CLOSING
357 * sOP -> sCG Move to CLOSING
358 * sCR -> sIV Close after CloseReq is invalid
359 * sCG -> sCG Retransmit
360 * sTW -> sIV Already closed
361 *
362 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
363 sIV, sIV, sIV, sCG, sCG, sIV, sCG, sIV
364 },
365 [DCCP_PKT_RESET] = {
366 /*
367 * sNO -> sIV No connection
368 * sRQ -> sTW Reset in response to Request
369 * sRS -> sTW Timeout, SHOULD send Reset (8.1.3.)
370 * sPO -> sTW Timeout, SHOULD send Reset (8.1.3.)
371 * sOP -> sTW
372 * sCR -> sTW
373 * sCG -> sTW
374 * sTW -> sIG Ignore (don't refresh timer)
375 *
376 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW, sTW */
377 sIV, sTW, sTW, sTW, sTW, sTW, sTW, sTW, sIG
378 },
379 [DCCP_PKT_SYNC] = {
380 /*
381 * We currently ignore Sync packets
382 *
383 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
384 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
385 },
386 [DCCP_PKT_SYNCACK] = {
387 /*
388 * We currently ignore SyncAck packets
389 *
390 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
391 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
392 },
393 },
394};
395
396static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
397 struct nf_conntrack_tuple *tuple)
398{
399 struct dccp_hdr _hdr, *dh;
400
401 dh = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
402 if (dh == NULL)
403 return false;
404
405 tuple->src.u.dccp.port = dh->dccph_sport;
406 tuple->dst.u.dccp.port = dh->dccph_dport;
407 return true;
408}
409
410static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv,
411 const struct nf_conntrack_tuple *tuple)
412{
413 inv->src.u.dccp.port = tuple->dst.u.dccp.port;
414 inv->dst.u.dccp.port = tuple->src.u.dccp.port;
415 return true;
416}
417
418static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
419 unsigned int dataoff)
420{
421 struct dccp_hdr _dh, *dh;
422 const char *msg;
423 u_int8_t state;
424
425 dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
426 BUG_ON(dh == NULL);
427
428 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
429 switch (state) {
430 default:
431 if (nf_ct_dccp_loose == 0) {
432 msg = "nf_ct_dccp: not picking up existing connection ";
433 goto out_invalid;
434 }
435 case CT_DCCP_REQUEST:
436 break;
437 case CT_DCCP_INVALID:
438 msg = "nf_ct_dccp: invalid state transition ";
439 goto out_invalid;
440 }
441
442 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
443 ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
444 ct->proto.dccp.state = CT_DCCP_NONE;
445 return true;
446
447out_invalid:
448 if (LOG_INVALID(IPPROTO_DCCP))
449 nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, msg);
450 return false;
451}
452
453static u64 dccp_ack_seq(const struct dccp_hdr *dh)
454{
455 const struct dccp_hdr_ack_bits *dhack;
456
457 dhack = (void *)dh + __dccp_basic_hdr_len(dh);
458 return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) +
459 ntohl(dhack->dccph_ack_nr_low);
460}
461
462static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
463 unsigned int dataoff, enum ip_conntrack_info ctinfo,
464 int pf, unsigned int hooknum)
465{
466 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
467 struct dccp_hdr _dh, *dh;
468 u_int8_t type, old_state, new_state;
469 enum ct_dccp_roles role;
470
471 dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
472 BUG_ON(dh == NULL);
473 type = dh->dccph_type;
474
475 if (type == DCCP_PKT_RESET &&
476 !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
477 /* Tear down connection immediately if only reply is a RESET */
478 if (del_timer(&ct->timeout))
479 ct->timeout.function((unsigned long)ct);
480 return NF_ACCEPT;
481 }
482
483 write_lock_bh(&dccp_lock);
484
485 role = ct->proto.dccp.role[dir];
486 old_state = ct->proto.dccp.state;
487 new_state = dccp_state_table[role][type][old_state];
488
489 switch (new_state) {
490 case CT_DCCP_REQUEST:
491 if (old_state == CT_DCCP_TIMEWAIT &&
492 role == CT_DCCP_ROLE_SERVER) {
493 /* Reincarnation in the reverse direction: reopen and
494 * reverse client/server roles. */
495 ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT;
496 ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER;
497 }
498 break;
499 case CT_DCCP_RESPOND:
500 if (old_state == CT_DCCP_REQUEST)
501 ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
502 break;
503 case CT_DCCP_PARTOPEN:
504 if (old_state == CT_DCCP_RESPOND &&
505 type == DCCP_PKT_ACK &&
506 dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq)
507 set_bit(IPS_ASSURED_BIT, &ct->status);
508 break;
509 case CT_DCCP_IGNORE:
510 /*
511 * Connection tracking might be out of sync, so we ignore
512 * packets that might establish a new connection and resync
513 * if the server responds with a valid Response.
514 */
515 if (ct->proto.dccp.last_dir == !dir &&
516 ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST &&
517 type == DCCP_PKT_RESPONSE) {
518 ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT;
519 ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER;
520 ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
521 new_state = CT_DCCP_RESPOND;
522 break;
523 }
524 ct->proto.dccp.last_dir = dir;
525 ct->proto.dccp.last_pkt = type;
526
527 write_unlock_bh(&dccp_lock);
528 if (LOG_INVALID(IPPROTO_DCCP))
529 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
530 "nf_ct_dccp: invalid packet ignored ");
531 return NF_ACCEPT;
532 case CT_DCCP_INVALID:
533 write_unlock_bh(&dccp_lock);
534 if (LOG_INVALID(IPPROTO_DCCP))
535 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
536 "nf_ct_dccp: invalid state transition ");
537 return -NF_ACCEPT;
538 }
539
540 ct->proto.dccp.last_dir = dir;
541 ct->proto.dccp.last_pkt = type;
542 ct->proto.dccp.state = new_state;
543 write_unlock_bh(&dccp_lock);
544 nf_ct_refresh_acct(ct, ctinfo, skb, dccp_timeout[new_state]);
545
546 return NF_ACCEPT;
547}
548
549static int dccp_error(struct sk_buff *skb, unsigned int dataoff,
550 enum ip_conntrack_info *ctinfo, int pf,
551 unsigned int hooknum)
552{
553 struct dccp_hdr _dh, *dh;
554 unsigned int dccp_len = skb->len - dataoff;
555 unsigned int cscov;
556 const char *msg;
557
558 dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
559 if (dh == NULL) {
560 msg = "nf_ct_dccp: short packet ";
561 goto out_invalid;
562 }
563
564 if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
565 dh->dccph_doff * 4 > dccp_len) {
566 msg = "nf_ct_dccp: truncated/malformed packet ";
567 goto out_invalid;
568 }
569
570 cscov = dccp_len;
571 if (dh->dccph_cscov) {
572 cscov = (dh->dccph_cscov - 1) * 4;
573 if (cscov > dccp_len) {
574 msg = "nf_ct_dccp: bad checksum coverage ";
575 goto out_invalid;
576 }
577 }
578
579 if (nf_conntrack_checksum && hooknum == NF_INET_PRE_ROUTING &&
580 nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP,
581 pf)) {
582 msg = "nf_ct_dccp: bad checksum ";
583 goto out_invalid;
584 }
585
586 if (dh->dccph_type >= DCCP_PKT_INVALID) {
587 msg = "nf_ct_dccp: reserved packet type ";
588 goto out_invalid;
589 }
590
591 return NF_ACCEPT;
592
593out_invalid:
594 if (LOG_INVALID(IPPROTO_DCCP))
595 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, msg);
596 return -NF_ACCEPT;
597}
598
599static int dccp_print_tuple(struct seq_file *s,
600 const struct nf_conntrack_tuple *tuple)
601{
602 return seq_printf(s, "sport=%hu dport=%hu ",
603 ntohs(tuple->src.u.dccp.port),
604 ntohs(tuple->dst.u.dccp.port));
605}
606
607static int dccp_print_conntrack(struct seq_file *s, const struct nf_conn *ct)
608{
609 return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
610}
611
612#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
613static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
614 const struct nf_conn *ct)
615{
616 struct nlattr *nest_parms;
617
618 read_lock_bh(&dccp_lock);
619 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
620 if (!nest_parms)
621 goto nla_put_failure;
622 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
623 nla_nest_end(skb, nest_parms);
624 read_unlock_bh(&dccp_lock);
625 return 0;
626
627nla_put_failure:
628 read_unlock_bh(&dccp_lock);
629 return -1;
630}
631
632static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
633 [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 },
634};
635
636static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
637{
638 struct nlattr *attr = cda[CTA_PROTOINFO_DCCP];
639 struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1];
640 int err;
641
642 if (!attr)
643 return 0;
644
645 err = nla_parse_nested(tb, CTA_PROTOINFO_DCCP_MAX, attr,
646 dccp_nla_policy);
647 if (err < 0)
648 return err;
649
650 if (!tb[CTA_PROTOINFO_DCCP_STATE] ||
651 nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE)
652 return -EINVAL;
653
654 write_lock_bh(&dccp_lock);
655 ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]);
656 write_unlock_bh(&dccp_lock);
657 return 0;
658}
659#endif
660
661#ifdef CONFIG_SYSCTL
662static unsigned int dccp_sysctl_table_users;
663static struct ctl_table_header *dccp_sysctl_header;
664static ctl_table dccp_sysctl_table[] = {
665 {
666 .ctl_name = CTL_UNNUMBERED,
667 .procname = "nf_conntrack_dccp_timeout_request",
668 .data = &dccp_timeout[CT_DCCP_REQUEST],
669 .maxlen = sizeof(unsigned int),
670 .mode = 0644,
671 .proc_handler = proc_dointvec_jiffies,
672 },
673 {
674 .ctl_name = CTL_UNNUMBERED,
675 .procname = "nf_conntrack_dccp_timeout_respond",
676 .data = &dccp_timeout[CT_DCCP_RESPOND],
677 .maxlen = sizeof(unsigned int),
678 .mode = 0644,
679 .proc_handler = proc_dointvec_jiffies,
680 },
681 {
682 .ctl_name = CTL_UNNUMBERED,
683 .procname = "nf_conntrack_dccp_timeout_partopen",
684 .data = &dccp_timeout[CT_DCCP_PARTOPEN],
685 .maxlen = sizeof(unsigned int),
686 .mode = 0644,
687 .proc_handler = proc_dointvec_jiffies,
688 },
689 {
690 .ctl_name = CTL_UNNUMBERED,
691 .procname = "nf_conntrack_dccp_timeout_open",
692 .data = &dccp_timeout[CT_DCCP_OPEN],
693 .maxlen = sizeof(unsigned int),
694 .mode = 0644,
695 .proc_handler = proc_dointvec_jiffies,
696 },
697 {
698 .ctl_name = CTL_UNNUMBERED,
699 .procname = "nf_conntrack_dccp_timeout_closereq",
700 .data = &dccp_timeout[CT_DCCP_CLOSEREQ],
701 .maxlen = sizeof(unsigned int),
702 .mode = 0644,
703 .proc_handler = proc_dointvec_jiffies,
704 },
705 {
706 .ctl_name = CTL_UNNUMBERED,
707 .procname = "nf_conntrack_dccp_timeout_closing",
708 .data = &dccp_timeout[CT_DCCP_CLOSING],
709 .maxlen = sizeof(unsigned int),
710 .mode = 0644,
711 .proc_handler = proc_dointvec_jiffies,
712 },
713 {
714 .ctl_name = CTL_UNNUMBERED,
715 .procname = "nf_conntrack_dccp_timeout_timewait",
716 .data = &dccp_timeout[CT_DCCP_TIMEWAIT],
717 .maxlen = sizeof(unsigned int),
718 .mode = 0644,
719 .proc_handler = proc_dointvec_jiffies,
720 },
721 {
722 .ctl_name = CTL_UNNUMBERED,
723 .procname = "nf_conntrack_dccp_loose",
724 .data = &nf_ct_dccp_loose,
725 .maxlen = sizeof(nf_ct_dccp_loose),
726 .mode = 0644,
727 .proc_handler = proc_dointvec,
728 },
729 {
730 .ctl_name = 0,
731 }
732};
733#endif /* CONFIG_SYSCTL */
734
735static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
736 .l3proto = AF_INET,
737 .l4proto = IPPROTO_DCCP,
738 .name = "dccp",
739 .pkt_to_tuple = dccp_pkt_to_tuple,
740 .invert_tuple = dccp_invert_tuple,
741 .new = dccp_new,
742 .packet = dccp_packet,
743 .error = dccp_error,
744 .print_tuple = dccp_print_tuple,
745 .print_conntrack = dccp_print_conntrack,
746#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
747 .to_nlattr = dccp_to_nlattr,
748 .from_nlattr = nlattr_to_dccp,
749 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
750 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
751 .nla_policy = nf_ct_port_nla_policy,
752#endif
753#ifdef CONFIG_SYSCTL
754 .ctl_table_users = &dccp_sysctl_table_users,
755 .ctl_table_header = &dccp_sysctl_header,
756 .ctl_table = dccp_sysctl_table,
757#endif
758};
759
760static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
761 .l3proto = AF_INET6,
762 .l4proto = IPPROTO_DCCP,
763 .name = "dccp",
764 .pkt_to_tuple = dccp_pkt_to_tuple,
765 .invert_tuple = dccp_invert_tuple,
766 .new = dccp_new,
767 .packet = dccp_packet,
768 .error = dccp_error,
769 .print_tuple = dccp_print_tuple,
770 .print_conntrack = dccp_print_conntrack,
771#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
772 .to_nlattr = dccp_to_nlattr,
773 .from_nlattr = nlattr_to_dccp,
774 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
775 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
776 .nla_policy = nf_ct_port_nla_policy,
777#endif
778#ifdef CONFIG_SYSCTL
779 .ctl_table_users = &dccp_sysctl_table_users,
780 .ctl_table_header = &dccp_sysctl_header,
781 .ctl_table = dccp_sysctl_table,
782#endif
783};
784
785static int __init nf_conntrack_proto_dccp_init(void)
786{
787 int err;
788
789 err = nf_conntrack_l4proto_register(&dccp_proto4);
790 if (err < 0)
791 goto err1;
792
793 err = nf_conntrack_l4proto_register(&dccp_proto6);
794 if (err < 0)
795 goto err2;
796 return 0;
797
798err2:
799 nf_conntrack_l4proto_unregister(&dccp_proto4);
800err1:
801 return err;
802}
803
804static void __exit nf_conntrack_proto_dccp_fini(void)
805{
806 nf_conntrack_l4proto_unregister(&dccp_proto6);
807 nf_conntrack_l4proto_unregister(&dccp_proto4);
808}
809
810module_init(nf_conntrack_proto_dccp_init);
811module_exit(nf_conntrack_proto_dccp_fini);
812
813MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
814MODULE_DESCRIPTION("DCCP connection tracking protocol helper");
815MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 55458915575f..e31b0e7bd0b1 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -14,23 +14,23 @@
14 14
15static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; 15static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
16 16
17static int generic_pkt_to_tuple(const struct sk_buff *skb, 17static bool generic_pkt_to_tuple(const struct sk_buff *skb,
18 unsigned int dataoff, 18 unsigned int dataoff,
19 struct nf_conntrack_tuple *tuple) 19 struct nf_conntrack_tuple *tuple)
20{ 20{
21 tuple->src.u.all = 0; 21 tuple->src.u.all = 0;
22 tuple->dst.u.all = 0; 22 tuple->dst.u.all = 0;
23 23
24 return 1; 24 return true;
25} 25}
26 26
27static int generic_invert_tuple(struct nf_conntrack_tuple *tuple, 27static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple,
28 const struct nf_conntrack_tuple *orig) 28 const struct nf_conntrack_tuple *orig)
29{ 29{
30 tuple->src.u.all = 0; 30 tuple->src.u.all = 0;
31 tuple->dst.u.all = 0; 31 tuple->dst.u.all = 0;
32 32
33 return 1; 33 return true;
34} 34}
35 35
36/* Print out the per-protocol part of the tuple. */ 36/* Print out the per-protocol part of the tuple. */
@@ -53,10 +53,10 @@ static int packet(struct nf_conn *ct,
53} 53}
54 54
55/* Called when a new connection for this protocol found. */ 55/* Called when a new connection for this protocol found. */
56static int new(struct nf_conn *ct, const struct sk_buff *skb, 56static bool new(struct nf_conn *ct, const struct sk_buff *skb,
57 unsigned int dataoff) 57 unsigned int dataoff)
58{ 58{
59 return 1; 59 return true;
60} 60}
61 61
62#ifdef CONFIG_SYSCTL 62#ifdef CONFIG_SYSCTL
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index e10024a1b666..654a4f7f12c6 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -82,7 +82,7 @@ static __be16 gre_keymap_lookup(struct nf_conntrack_tuple *t)
82 read_unlock_bh(&nf_ct_gre_lock); 82 read_unlock_bh(&nf_ct_gre_lock);
83 83
84 pr_debug("lookup src key 0x%x for ", key); 84 pr_debug("lookup src key 0x%x for ", key);
85 NF_CT_DUMP_TUPLE(t); 85 nf_ct_dump_tuple(t);
86 86
87 return key; 87 return key;
88} 88}
@@ -113,7 +113,7 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
113 *kmp = km; 113 *kmp = km;
114 114
115 pr_debug("adding new entry %p: ", km); 115 pr_debug("adding new entry %p: ", km);
116 NF_CT_DUMP_TUPLE(&km->tuple); 116 nf_ct_dump_tuple(&km->tuple);
117 117
118 write_lock_bh(&nf_ct_gre_lock); 118 write_lock_bh(&nf_ct_gre_lock);
119 list_add_tail(&km->list, &gre_keymap_list); 119 list_add_tail(&km->list, &gre_keymap_list);
@@ -148,18 +148,17 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
148/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */ 148/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
149 149
150/* invert gre part of tuple */ 150/* invert gre part of tuple */
151static int gre_invert_tuple(struct nf_conntrack_tuple *tuple, 151static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple,
152 const struct nf_conntrack_tuple *orig) 152 const struct nf_conntrack_tuple *orig)
153{ 153{
154 tuple->dst.u.gre.key = orig->src.u.gre.key; 154 tuple->dst.u.gre.key = orig->src.u.gre.key;
155 tuple->src.u.gre.key = orig->dst.u.gre.key; 155 tuple->src.u.gre.key = orig->dst.u.gre.key;
156 return 1; 156 return true;
157} 157}
158 158
159/* gre hdr info to tuple */ 159/* gre hdr info to tuple */
160static int gre_pkt_to_tuple(const struct sk_buff *skb, 160static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
161 unsigned int dataoff, 161 struct nf_conntrack_tuple *tuple)
162 struct nf_conntrack_tuple *tuple)
163{ 162{
164 const struct gre_hdr_pptp *pgrehdr; 163 const struct gre_hdr_pptp *pgrehdr;
165 struct gre_hdr_pptp _pgrehdr; 164 struct gre_hdr_pptp _pgrehdr;
@@ -173,24 +172,24 @@ static int gre_pkt_to_tuple(const struct sk_buff *skb,
173 /* try to behave like "nf_conntrack_proto_generic" */ 172 /* try to behave like "nf_conntrack_proto_generic" */
174 tuple->src.u.all = 0; 173 tuple->src.u.all = 0;
175 tuple->dst.u.all = 0; 174 tuple->dst.u.all = 0;
176 return 1; 175 return true;
177 } 176 }
178 177
179 /* PPTP header is variable length, only need up to the call_id field */ 178 /* PPTP header is variable length, only need up to the call_id field */
180 pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr); 179 pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
181 if (!pgrehdr) 180 if (!pgrehdr)
182 return 1; 181 return true;
183 182
184 if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) { 183 if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
185 pr_debug("GRE_VERSION_PPTP but unknown proto\n"); 184 pr_debug("GRE_VERSION_PPTP but unknown proto\n");
186 return 0; 185 return false;
187 } 186 }
188 187
189 tuple->dst.u.gre.key = pgrehdr->call_id; 188 tuple->dst.u.gre.key = pgrehdr->call_id;
190 srckey = gre_keymap_lookup(tuple); 189 srckey = gre_keymap_lookup(tuple);
191 tuple->src.u.gre.key = srckey; 190 tuple->src.u.gre.key = srckey;
192 191
193 return 1; 192 return true;
194} 193}
195 194
196/* print gre part of tuple */ 195/* print gre part of tuple */
@@ -235,18 +234,18 @@ static int gre_packet(struct nf_conn *ct,
235} 234}
236 235
237/* Called when a new connection for this protocol found. */ 236/* Called when a new connection for this protocol found. */
238static int gre_new(struct nf_conn *ct, const struct sk_buff *skb, 237static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
239 unsigned int dataoff) 238 unsigned int dataoff)
240{ 239{
241 pr_debug(": "); 240 pr_debug(": ");
242 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 241 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
243 242
244 /* initialize to sane value. Ideally a conntrack helper 243 /* initialize to sane value. Ideally a conntrack helper
245 * (e.g. in case of pptp) is increasing them */ 244 * (e.g. in case of pptp) is increasing them */
246 ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT; 245 ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
247 ct->proto.gre.timeout = GRE_TIMEOUT; 246 ct->proto.gre.timeout = GRE_TIMEOUT;
248 247
249 return 1; 248 return true;
250} 249}
251 250
252/* Called when a conntrack entry has already been removed from the hashes 251/* Called when a conntrack entry has already been removed from the hashes
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index f9a08370dbb3..cbf2e27a22b2 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -33,7 +33,7 @@ static DEFINE_RWLOCK(sctp_lock);
33 33
34 And so for me for SCTP :D -Kiran */ 34 And so for me for SCTP :D -Kiran */
35 35
36static const char *sctp_conntrack_names[] = { 36static const char *const sctp_conntrack_names[] = {
37 "NONE", 37 "NONE",
38 "CLOSED", 38 "CLOSED",
39 "COOKIE_WAIT", 39 "COOKIE_WAIT",
@@ -130,28 +130,28 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
130 } 130 }
131}; 131};
132 132
133static int sctp_pkt_to_tuple(const struct sk_buff *skb, 133static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
134 unsigned int dataoff, 134 struct nf_conntrack_tuple *tuple)
135 struct nf_conntrack_tuple *tuple)
136{ 135{
137 sctp_sctphdr_t _hdr, *hp; 136 const struct sctphdr *hp;
137 struct sctphdr _hdr;
138 138
139 /* Actually only need first 8 bytes. */ 139 /* Actually only need first 8 bytes. */
140 hp = skb_header_pointer(skb, dataoff, 8, &_hdr); 140 hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
141 if (hp == NULL) 141 if (hp == NULL)
142 return 0; 142 return false;
143 143
144 tuple->src.u.sctp.port = hp->source; 144 tuple->src.u.sctp.port = hp->source;
145 tuple->dst.u.sctp.port = hp->dest; 145 tuple->dst.u.sctp.port = hp->dest;
146 return 1; 146 return true;
147} 147}
148 148
149static int sctp_invert_tuple(struct nf_conntrack_tuple *tuple, 149static bool sctp_invert_tuple(struct nf_conntrack_tuple *tuple,
150 const struct nf_conntrack_tuple *orig) 150 const struct nf_conntrack_tuple *orig)
151{ 151{
152 tuple->src.u.sctp.port = orig->dst.u.sctp.port; 152 tuple->src.u.sctp.port = orig->dst.u.sctp.port;
153 tuple->dst.u.sctp.port = orig->src.u.sctp.port; 153 tuple->dst.u.sctp.port = orig->src.u.sctp.port;
154 return 1; 154 return true;
155} 155}
156 156
157/* Print out the per-protocol part of the tuple. */ 157/* Print out the per-protocol part of the tuple. */
@@ -292,8 +292,10 @@ static int sctp_packet(struct nf_conn *ct,
292{ 292{
293 enum sctp_conntrack new_state, old_state; 293 enum sctp_conntrack new_state, old_state;
294 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 294 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
295 sctp_sctphdr_t _sctph, *sh; 295 const struct sctphdr *sh;
296 sctp_chunkhdr_t _sch, *sch; 296 struct sctphdr _sctph;
297 const struct sctp_chunkhdr *sch;
298 struct sctp_chunkhdr _sch;
297 u_int32_t offset, count; 299 u_int32_t offset, count;
298 unsigned long map[256 / sizeof(unsigned long)] = { 0 }; 300 unsigned long map[256 / sizeof(unsigned long)] = { 0 };
299 301
@@ -390,27 +392,29 @@ out:
390} 392}
391 393
392/* Called when a new connection for this protocol found. */ 394/* Called when a new connection for this protocol found. */
393static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb, 395static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
394 unsigned int dataoff) 396 unsigned int dataoff)
395{ 397{
396 enum sctp_conntrack new_state; 398 enum sctp_conntrack new_state;
397 sctp_sctphdr_t _sctph, *sh; 399 const struct sctphdr *sh;
398 sctp_chunkhdr_t _sch, *sch; 400 struct sctphdr _sctph;
401 const struct sctp_chunkhdr *sch;
402 struct sctp_chunkhdr _sch;
399 u_int32_t offset, count; 403 u_int32_t offset, count;
400 unsigned long map[256 / sizeof(unsigned long)] = { 0 }; 404 unsigned long map[256 / sizeof(unsigned long)] = { 0 };
401 405
402 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 406 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
403 if (sh == NULL) 407 if (sh == NULL)
404 return 0; 408 return false;
405 409
406 if (do_basic_checks(ct, skb, dataoff, map) != 0) 410 if (do_basic_checks(ct, skb, dataoff, map) != 0)
407 return 0; 411 return false;
408 412
409 /* If an OOTB packet has any of these chunks discard (Sec 8.4) */ 413 /* If an OOTB packet has any of these chunks discard (Sec 8.4) */
410 if (test_bit(SCTP_CID_ABORT, map) || 414 if (test_bit(SCTP_CID_ABORT, map) ||
411 test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) || 415 test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) ||
412 test_bit(SCTP_CID_COOKIE_ACK, map)) 416 test_bit(SCTP_CID_COOKIE_ACK, map))
413 return 0; 417 return false;
414 418
415 new_state = SCTP_CONNTRACK_MAX; 419 new_state = SCTP_CONNTRACK_MAX;
416 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { 420 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
@@ -422,7 +426,7 @@ static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
422 if (new_state == SCTP_CONNTRACK_NONE || 426 if (new_state == SCTP_CONNTRACK_NONE ||
423 new_state == SCTP_CONNTRACK_MAX) { 427 new_state == SCTP_CONNTRACK_MAX) {
424 pr_debug("nf_conntrack_sctp: invalid new deleting.\n"); 428 pr_debug("nf_conntrack_sctp: invalid new deleting.\n");
425 return 0; 429 return false;
426 } 430 }
427 431
428 /* Copy the vtag into the state info */ 432 /* Copy the vtag into the state info */
@@ -433,7 +437,7 @@ static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
433 ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), 437 ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
434 sizeof(_inithdr), &_inithdr); 438 sizeof(_inithdr), &_inithdr);
435 if (ih == NULL) 439 if (ih == NULL)
436 return 0; 440 return false;
437 441
438 pr_debug("Setting vtag %x for new conn\n", 442 pr_debug("Setting vtag %x for new conn\n",
439 ih->init_tag); 443 ih->init_tag);
@@ -442,7 +446,7 @@ static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
442 ih->init_tag; 446 ih->init_tag;
443 } else { 447 } else {
444 /* Sec 8.5.1 (A) */ 448 /* Sec 8.5.1 (A) */
445 return 0; 449 return false;
446 } 450 }
447 } 451 }
448 /* If it is a shutdown ack OOTB packet, we expect a return 452 /* If it is a shutdown ack OOTB packet, we expect a return
@@ -456,7 +460,7 @@ static int sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
456 ct->proto.sctp.state = new_state; 460 ct->proto.sctp.state = new_state;
457 } 461 }
458 462
459 return 1; 463 return true;
460} 464}
461 465
462#ifdef CONFIG_SYSCTL 466#ifdef CONFIG_SYSCTL
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 62567959b66e..ba94004fe323 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -257,9 +257,8 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
257 } 257 }
258}; 258};
259 259
260static int tcp_pkt_to_tuple(const struct sk_buff *skb, 260static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
261 unsigned int dataoff, 261 struct nf_conntrack_tuple *tuple)
262 struct nf_conntrack_tuple *tuple)
263{ 262{
264 const struct tcphdr *hp; 263 const struct tcphdr *hp;
265 struct tcphdr _hdr; 264 struct tcphdr _hdr;
@@ -267,20 +266,20 @@ static int tcp_pkt_to_tuple(const struct sk_buff *skb,
267 /* Actually only need first 8 bytes. */ 266 /* Actually only need first 8 bytes. */
268 hp = skb_header_pointer(skb, dataoff, 8, &_hdr); 267 hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
269 if (hp == NULL) 268 if (hp == NULL)
270 return 0; 269 return false;
271 270
272 tuple->src.u.tcp.port = hp->source; 271 tuple->src.u.tcp.port = hp->source;
273 tuple->dst.u.tcp.port = hp->dest; 272 tuple->dst.u.tcp.port = hp->dest;
274 273
275 return 1; 274 return true;
276} 275}
277 276
278static int tcp_invert_tuple(struct nf_conntrack_tuple *tuple, 277static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
279 const struct nf_conntrack_tuple *orig) 278 const struct nf_conntrack_tuple *orig)
280{ 279{
281 tuple->src.u.tcp.port = orig->dst.u.tcp.port; 280 tuple->src.u.tcp.port = orig->dst.u.tcp.port;
282 tuple->dst.u.tcp.port = orig->src.u.tcp.port; 281 tuple->dst.u.tcp.port = orig->src.u.tcp.port;
283 return 1; 282 return true;
284} 283}
285 284
286/* Print out the per-protocol part of the tuple. */ 285/* Print out the per-protocol part of the tuple. */
@@ -478,20 +477,20 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
478 } 477 }
479} 478}
480 479
481static int tcp_in_window(const struct nf_conn *ct, 480static bool tcp_in_window(const struct nf_conn *ct,
482 struct ip_ct_tcp *state, 481 struct ip_ct_tcp *state,
483 enum ip_conntrack_dir dir, 482 enum ip_conntrack_dir dir,
484 unsigned int index, 483 unsigned int index,
485 const struct sk_buff *skb, 484 const struct sk_buff *skb,
486 unsigned int dataoff, 485 unsigned int dataoff,
487 const struct tcphdr *tcph, 486 const struct tcphdr *tcph,
488 int pf) 487 int pf)
489{ 488{
490 struct ip_ct_tcp_state *sender = &state->seen[dir]; 489 struct ip_ct_tcp_state *sender = &state->seen[dir];
491 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 490 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
492 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 491 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
493 __u32 seq, ack, sack, end, win, swin; 492 __u32 seq, ack, sack, end, win, swin;
494 int res; 493 bool res;
495 494
496 /* 495 /*
497 * Get the required data from the packet. 496 * Get the required data from the packet.
@@ -506,7 +505,7 @@ static int tcp_in_window(const struct nf_conn *ct,
506 505
507 pr_debug("tcp_in_window: START\n"); 506 pr_debug("tcp_in_window: START\n");
508 pr_debug("tcp_in_window: "); 507 pr_debug("tcp_in_window: ");
509 NF_CT_DUMP_TUPLE(tuple); 508 nf_ct_dump_tuple(tuple);
510 pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n", 509 pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n",
511 seq, ack, sack, win, end); 510 seq, ack, sack, win, end);
512 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " 511 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
@@ -593,7 +592,7 @@ static int tcp_in_window(const struct nf_conn *ct,
593 seq = end = sender->td_end; 592 seq = end = sender->td_end;
594 593
595 pr_debug("tcp_in_window: "); 594 pr_debug("tcp_in_window: ");
596 NF_CT_DUMP_TUPLE(tuple); 595 nf_ct_dump_tuple(tuple);
597 pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n", 596 pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n",
598 seq, ack, sack, win, end); 597 seq, ack, sack, win, end);
599 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " 598 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
@@ -657,12 +656,12 @@ static int tcp_in_window(const struct nf_conn *ct,
657 state->retrans = 0; 656 state->retrans = 0;
658 } 657 }
659 } 658 }
660 res = 1; 659 res = true;
661 } else { 660 } else {
662 res = 0; 661 res = false;
663 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || 662 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
664 nf_ct_tcp_be_liberal) 663 nf_ct_tcp_be_liberal)
665 res = 1; 664 res = true;
666 if (!res && LOG_INVALID(IPPROTO_TCP)) 665 if (!res && LOG_INVALID(IPPROTO_TCP))
667 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 666 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
668 "nf_ct_tcp: %s ", 667 "nf_ct_tcp: %s ",
@@ -676,7 +675,7 @@ static int tcp_in_window(const struct nf_conn *ct,
676 : "SEQ is over the upper bound (over the window of the receiver)"); 675 : "SEQ is over the upper bound (over the window of the receiver)");
677 } 676 }
678 677
679 pr_debug("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " 678 pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
680 "receiver end=%u maxend=%u maxwin=%u\n", 679 "receiver end=%u maxend=%u maxwin=%u\n",
681 res, sender->td_end, sender->td_maxend, sender->td_maxwin, 680 res, sender->td_end, sender->td_maxend, sender->td_maxwin,
682 receiver->td_end, receiver->td_maxend, receiver->td_maxwin); 681 receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
@@ -937,7 +936,7 @@ static int tcp_packet(struct nf_conn *ct,
937 ct->proto.tcp.last_dir = dir; 936 ct->proto.tcp.last_dir = dir;
938 937
939 pr_debug("tcp_conntracks: "); 938 pr_debug("tcp_conntracks: ");
940 NF_CT_DUMP_TUPLE(tuple); 939 nf_ct_dump_tuple(tuple);
941 pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n", 940 pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
942 (th->syn ? 1 : 0), (th->ack ? 1 : 0), 941 (th->syn ? 1 : 0), (th->ack ? 1 : 0),
943 (th->fin ? 1 : 0), (th->rst ? 1 : 0), 942 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
@@ -982,9 +981,8 @@ static int tcp_packet(struct nf_conn *ct,
982} 981}
983 982
984/* Called when a new connection for this protocol found. */ 983/* Called when a new connection for this protocol found. */
985static int tcp_new(struct nf_conn *ct, 984static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
986 const struct sk_buff *skb, 985 unsigned int dataoff)
987 unsigned int dataoff)
988{ 986{
989 enum tcp_conntrack new_state; 987 enum tcp_conntrack new_state;
990 const struct tcphdr *th; 988 const struct tcphdr *th;
@@ -1003,7 +1001,7 @@ static int tcp_new(struct nf_conn *ct,
1003 /* Invalid: delete conntrack */ 1001 /* Invalid: delete conntrack */
1004 if (new_state >= TCP_CONNTRACK_MAX) { 1002 if (new_state >= TCP_CONNTRACK_MAX) {
1005 pr_debug("nf_ct_tcp: invalid new deleting.\n"); 1003 pr_debug("nf_ct_tcp: invalid new deleting.\n");
1006 return 0; 1004 return false;
1007 } 1005 }
1008 1006
1009 if (new_state == TCP_CONNTRACK_SYN_SENT) { 1007 if (new_state == TCP_CONNTRACK_SYN_SENT) {
@@ -1021,7 +1019,7 @@ static int tcp_new(struct nf_conn *ct,
1021 ct->proto.tcp.seen[1].flags = 0; 1019 ct->proto.tcp.seen[1].flags = 0;
1022 } else if (nf_ct_tcp_loose == 0) { 1020 } else if (nf_ct_tcp_loose == 0) {
1023 /* Don't try to pick up connections. */ 1021 /* Don't try to pick up connections. */
1024 return 0; 1022 return false;
1025 } else { 1023 } else {
1026 /* 1024 /*
1027 * We are in the middle of a connection, 1025 * We are in the middle of a connection,
@@ -1061,7 +1059,7 @@ static int tcp_new(struct nf_conn *ct,
1061 sender->td_scale, 1059 sender->td_scale,
1062 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 1060 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
1063 receiver->td_scale); 1061 receiver->td_scale);
1064 return 1; 1062 return true;
1065} 1063}
1066 1064
1067#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 1065#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
@@ -1129,11 +1127,13 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1129 if (err < 0) 1127 if (err < 0)
1130 return err; 1128 return err;
1131 1129
1132 if (!tb[CTA_PROTOINFO_TCP_STATE]) 1130 if (tb[CTA_PROTOINFO_TCP_STATE] &&
1131 nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1133 return -EINVAL; 1132 return -EINVAL;
1134 1133
1135 write_lock_bh(&tcp_lock); 1134 write_lock_bh(&tcp_lock);
1136 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]); 1135 if (tb[CTA_PROTOINFO_TCP_STATE])
1136 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1137 1137
1138 if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) { 1138 if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1139 struct nf_ct_tcp_flags *attr = 1139 struct nf_ct_tcp_flags *attr =
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index b8a35cc06416..8b21762e65de 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -26,7 +26,7 @@
26static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; 26static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
27static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ; 27static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
28 28
29static int udp_pkt_to_tuple(const struct sk_buff *skb, 29static bool udp_pkt_to_tuple(const struct sk_buff *skb,
30 unsigned int dataoff, 30 unsigned int dataoff,
31 struct nf_conntrack_tuple *tuple) 31 struct nf_conntrack_tuple *tuple)
32{ 32{
@@ -36,20 +36,20 @@ static int udp_pkt_to_tuple(const struct sk_buff *skb,
36 /* Actually only need first 8 bytes. */ 36 /* Actually only need first 8 bytes. */
37 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 37 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
38 if (hp == NULL) 38 if (hp == NULL)
39 return 0; 39 return false;
40 40
41 tuple->src.u.udp.port = hp->source; 41 tuple->src.u.udp.port = hp->source;
42 tuple->dst.u.udp.port = hp->dest; 42 tuple->dst.u.udp.port = hp->dest;
43 43
44 return 1; 44 return true;
45} 45}
46 46
47static int udp_invert_tuple(struct nf_conntrack_tuple *tuple, 47static bool udp_invert_tuple(struct nf_conntrack_tuple *tuple,
48 const struct nf_conntrack_tuple *orig) 48 const struct nf_conntrack_tuple *orig)
49{ 49{
50 tuple->src.u.udp.port = orig->dst.u.udp.port; 50 tuple->src.u.udp.port = orig->dst.u.udp.port;
51 tuple->dst.u.udp.port = orig->src.u.udp.port; 51 tuple->dst.u.udp.port = orig->src.u.udp.port;
52 return 1; 52 return true;
53} 53}
54 54
55/* Print out the per-protocol part of the tuple. */ 55/* Print out the per-protocol part of the tuple. */
@@ -83,10 +83,10 @@ static int udp_packet(struct nf_conn *ct,
83} 83}
84 84
85/* Called when a new connection for this protocol found. */ 85/* Called when a new connection for this protocol found. */
86static int udp_new(struct nf_conn *ct, const struct sk_buff *skb, 86static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
87 unsigned int dataoff) 87 unsigned int dataoff)
88{ 88{
89 return 1; 89 return true;
90} 90}
91 91
92static int udp_error(struct sk_buff *skb, unsigned int dataoff, 92static int udp_error(struct sk_buff *skb, unsigned int dataoff,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 9dd03c7aeac6..1fa62f3c24f1 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -27,28 +27,28 @@
27static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ; 27static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ;
28static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ; 28static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ;
29 29
30static int udplite_pkt_to_tuple(const struct sk_buff *skb, 30static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
31 unsigned int dataoff, 31 unsigned int dataoff,
32 struct nf_conntrack_tuple *tuple) 32 struct nf_conntrack_tuple *tuple)
33{ 33{
34 const struct udphdr *hp; 34 const struct udphdr *hp;
35 struct udphdr _hdr; 35 struct udphdr _hdr;
36 36
37 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 37 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
38 if (hp == NULL) 38 if (hp == NULL)
39 return 0; 39 return false;
40 40
41 tuple->src.u.udp.port = hp->source; 41 tuple->src.u.udp.port = hp->source;
42 tuple->dst.u.udp.port = hp->dest; 42 tuple->dst.u.udp.port = hp->dest;
43 return 1; 43 return true;
44} 44}
45 45
46static int udplite_invert_tuple(struct nf_conntrack_tuple *tuple, 46static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple,
47 const struct nf_conntrack_tuple *orig) 47 const struct nf_conntrack_tuple *orig)
48{ 48{
49 tuple->src.u.udp.port = orig->dst.u.udp.port; 49 tuple->src.u.udp.port = orig->dst.u.udp.port;
50 tuple->dst.u.udp.port = orig->src.u.udp.port; 50 tuple->dst.u.udp.port = orig->src.u.udp.port;
51 return 1; 51 return true;
52} 52}
53 53
54/* Print out the per-protocol part of the tuple. */ 54/* Print out the per-protocol part of the tuple. */
@@ -83,10 +83,10 @@ static int udplite_packet(struct nf_conn *ct,
83} 83}
84 84
85/* Called when a new connection for this protocol found. */ 85/* Called when a new connection for this protocol found. */
86static int udplite_new(struct nf_conn *ct, const struct sk_buff *skb, 86static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
87 unsigned int dataoff) 87 unsigned int dataoff)
88{ 88{
89 return 1; 89 return true;
90} 90}
91 91
92static int udplite_error(struct sk_buff *skb, unsigned int dataoff, 92static int udplite_error(struct sk_buff *skb, unsigned int dataoff,
@@ -127,32 +127,13 @@ static int udplite_error(struct sk_buff *skb, unsigned int dataoff,
127 } 127 }
128 128
129 /* Checksum invalid? Ignore. */ 129 /* Checksum invalid? Ignore. */
130 if (nf_conntrack_checksum && !skb_csum_unnecessary(skb) && 130 if (nf_conntrack_checksum && hooknum == NF_INET_PRE_ROUTING &&
131 hooknum == NF_INET_PRE_ROUTING) { 131 nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
132 if (pf == PF_INET) { 132 pf)) {
133 struct iphdr *iph = ip_hdr(skb); 133 if (LOG_INVALID(IPPROTO_UDPLITE))
134 134 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
135 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, 135 "nf_ct_udplite: bad UDPLite checksum ");
136 udplen, IPPROTO_UDPLITE, 0); 136 return -NF_ACCEPT;
137 } else {
138 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
139 __wsum hsum = skb_checksum(skb, 0, dataoff, 0);
140
141 skb->csum = ~csum_unfold(
142 csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
143 udplen, IPPROTO_UDPLITE,
144 csum_sub(0, hsum)));
145 }
146
147 skb->ip_summed = CHECKSUM_NONE;
148 if (__skb_checksum_complete_head(skb, dataoff + cscov)) {
149 if (LOG_INVALID(IPPROTO_UDPLITE))
150 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
151 "nf_ct_udplite: bad UDPLite "
152 "checksum ");
153 return -NF_ACCEPT;
154 }
155 skb->ip_summed = CHECKSUM_UNNECESSARY;
156 } 137 }
157 138
158 return NF_ACCEPT; 139 return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index a70051d741a7..a94294b2b23c 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -72,7 +72,6 @@ static int help(struct sk_buff *skb,
72 struct nf_conntrack_tuple *tuple; 72 struct nf_conntrack_tuple *tuple;
73 struct sane_request *req; 73 struct sane_request *req;
74 struct sane_reply_net_start *reply; 74 struct sane_reply_net_start *reply;
75 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
76 75
77 ct_sane_info = &nfct_help(ct)->help.ct_sane_info; 76 ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
78 /* Until there's been traffic both ways, don't look in packets. */ 77 /* Until there's been traffic both ways, don't look in packets. */
@@ -143,11 +142,12 @@ static int help(struct sk_buff *skb,
143 } 142 }
144 143
145 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 144 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
146 nf_ct_expect_init(exp, family, &tuple->src.u3, &tuple->dst.u3, 145 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
146 &tuple->src.u3, &tuple->dst.u3,
147 IPPROTO_TCP, NULL, &reply->port); 147 IPPROTO_TCP, NULL, &reply->port);
148 148
149 pr_debug("nf_ct_sane: expect: "); 149 pr_debug("nf_ct_sane: expect: ");
150 NF_CT_DUMP_TUPLE(&exp->tuple); 150 nf_ct_dump_tuple(&exp->tuple);
151 151
152 /* Can't expect this? Best to drop packet now. */ 152 /* Can't expect this? Best to drop packet now. */
153 if (nf_ct_expect_related(exp) != 0) 153 if (nf_ct_expect_related(exp) != 0)
@@ -163,6 +163,11 @@ out:
163static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly; 163static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly;
164static char sane_names[MAX_PORTS][2][sizeof("sane-65535")] __read_mostly; 164static char sane_names[MAX_PORTS][2][sizeof("sane-65535")] __read_mostly;
165 165
166static const struct nf_conntrack_expect_policy sane_exp_policy = {
167 .max_expected = 1,
168 .timeout = 5 * 60,
169};
170
166/* don't make this __exit, since it's called from __init ! */ 171/* don't make this __exit, since it's called from __init ! */
167static void nf_conntrack_sane_fini(void) 172static void nf_conntrack_sane_fini(void)
168{ 173{
@@ -200,8 +205,7 @@ static int __init nf_conntrack_sane_init(void)
200 for (j = 0; j < 2; j++) { 205 for (j = 0; j < 2; j++) {
201 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]); 206 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
202 sane[i][j].tuple.dst.protonum = IPPROTO_TCP; 207 sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
203 sane[i][j].max_expected = 1; 208 sane[i][j].expect_policy = &sane_exp_policy;
204 sane[i][j].timeout = 5 * 60; /* 5 Minutes */
205 sane[i][j].me = THIS_MODULE; 209 sane[i][j].me = THIS_MODULE;
206 sane[i][j].help = help; 210 sane[i][j].help = help;
207 tmpname = &sane_names[i][j][0]; 211 tmpname = &sane_names[i][j][0];
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index c521c891d351..65b3ba57a3b7 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -2,6 +2,8 @@
2 * 2 *
3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> 3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
4 * based on RR's ip_conntrack_ftp.c and other modules. 4 * based on RR's ip_conntrack_ftp.c and other modules.
5 * (C) 2007 United Security Providers
6 * (C) 2007, 2008 Patrick McHardy <kaber@trash.net>
5 * 7 *
6 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -17,6 +19,7 @@
17#include <linux/netfilter.h> 19#include <linux/netfilter.h>
18 20
19#include <net/netfilter/nf_conntrack.h> 21#include <net/netfilter/nf_conntrack.h>
22#include <net/netfilter/nf_conntrack_core.h>
20#include <net/netfilter/nf_conntrack_expect.h> 23#include <net/netfilter/nf_conntrack_expect.h>
21#include <net/netfilter/nf_conntrack_helper.h> 24#include <net/netfilter/nf_conntrack_helper.h>
22#include <linux/netfilter/nf_conntrack_sip.h> 25#include <linux/netfilter/nf_conntrack_sip.h>
@@ -36,214 +39,102 @@ static unsigned int sip_timeout __read_mostly = SIP_TIMEOUT;
36module_param(sip_timeout, uint, 0600); 39module_param(sip_timeout, uint, 0600);
37MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session"); 40MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session");
38 41
42static int sip_direct_signalling __read_mostly = 1;
43module_param(sip_direct_signalling, int, 0600);
44MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar "
45 "only (default 1)");
46
47static int sip_direct_media __read_mostly = 1;
48module_param(sip_direct_media, int, 0600);
49MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
50 "endpoints only (default 1)");
51
39unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, 52unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
40 enum ip_conntrack_info ctinfo, 53 const char **dptr,
41 struct nf_conn *ct, 54 unsigned int *datalen) __read_mostly;
42 const char **dptr) __read_mostly;
43EXPORT_SYMBOL_GPL(nf_nat_sip_hook); 55EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
44 56
45unsigned int (*nf_nat_sdp_hook)(struct sk_buff *skb, 57unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
46 enum ip_conntrack_info ctinfo, 58 const char **dptr,
47 struct nf_conntrack_expect *exp, 59 unsigned int *datalen,
48 const char *dptr) __read_mostly; 60 struct nf_conntrack_expect *exp,
49EXPORT_SYMBOL_GPL(nf_nat_sdp_hook); 61 unsigned int matchoff,
50 62 unsigned int matchlen) __read_mostly;
51static int digits_len(const struct nf_conn *, const char *, const char *, int *); 63EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
52static int epaddr_len(const struct nf_conn *, const char *, const char *, int *);
53static int skp_digits_len(const struct nf_conn *, const char *, const char *, int *);
54static int skp_epaddr_len(const struct nf_conn *, const char *, const char *, int *);
55
56struct sip_header_nfo {
57 const char *lname;
58 const char *sname;
59 const char *ln_str;
60 size_t lnlen;
61 size_t snlen;
62 size_t ln_strlen;
63 int case_sensitive;
64 int (*match_len)(const struct nf_conn *, const char *,
65 const char *, int *);
66};
67 64
68static const struct sip_header_nfo ct_sip_hdrs[] = { 65unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
69 [POS_REG_REQ_URI] = { /* SIP REGISTER request URI */ 66 const char **dptr,
70 .lname = "sip:", 67 unsigned int dataoff,
71 .lnlen = sizeof("sip:") - 1, 68 unsigned int *datalen,
72 .ln_str = ":", 69 enum sdp_header_types type,
73 .ln_strlen = sizeof(":") - 1, 70 enum sdp_header_types term,
74 .match_len = epaddr_len, 71 const union nf_inet_addr *addr)
75 }, 72 __read_mostly;
76 [POS_REQ_URI] = { /* SIP request URI */ 73EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
77 .lname = "sip:",
78 .lnlen = sizeof("sip:") - 1,
79 .ln_str = "@",
80 .ln_strlen = sizeof("@") - 1,
81 .match_len = epaddr_len,
82 },
83 [POS_FROM] = { /* SIP From header */
84 .lname = "From:",
85 .lnlen = sizeof("From:") - 1,
86 .sname = "\r\nf:",
87 .snlen = sizeof("\r\nf:") - 1,
88 .ln_str = "sip:",
89 .ln_strlen = sizeof("sip:") - 1,
90 .match_len = skp_epaddr_len,
91 },
92 [POS_TO] = { /* SIP To header */
93 .lname = "To:",
94 .lnlen = sizeof("To:") - 1,
95 .sname = "\r\nt:",
96 .snlen = sizeof("\r\nt:") - 1,
97 .ln_str = "sip:",
98 .ln_strlen = sizeof("sip:") - 1,
99 .match_len = skp_epaddr_len
100 },
101 [POS_VIA] = { /* SIP Via header */
102 .lname = "Via:",
103 .lnlen = sizeof("Via:") - 1,
104 .sname = "\r\nv:",
105 .snlen = sizeof("\r\nv:") - 1, /* rfc3261 "\r\n" */
106 .ln_str = "UDP ",
107 .ln_strlen = sizeof("UDP ") - 1,
108 .match_len = epaddr_len,
109 },
110 [POS_CONTACT] = { /* SIP Contact header */
111 .lname = "Contact:",
112 .lnlen = sizeof("Contact:") - 1,
113 .sname = "\r\nm:",
114 .snlen = sizeof("\r\nm:") - 1,
115 .ln_str = "sip:",
116 .ln_strlen = sizeof("sip:") - 1,
117 .match_len = skp_epaddr_len
118 },
119 [POS_CONTENT] = { /* SIP Content length header */
120 .lname = "Content-Length:",
121 .lnlen = sizeof("Content-Length:") - 1,
122 .sname = "\r\nl:",
123 .snlen = sizeof("\r\nl:") - 1,
124 .ln_str = ":",
125 .ln_strlen = sizeof(":") - 1,
126 .match_len = skp_digits_len
127 },
128 [POS_MEDIA] = { /* SDP media info */
129 .case_sensitive = 1,
130 .lname = "\nm=",
131 .lnlen = sizeof("\nm=") - 1,
132 .sname = "\rm=",
133 .snlen = sizeof("\rm=") - 1,
134 .ln_str = "audio ",
135 .ln_strlen = sizeof("audio ") - 1,
136 .match_len = digits_len
137 },
138 [POS_OWNER_IP4] = { /* SDP owner address*/
139 .case_sensitive = 1,
140 .lname = "\no=",
141 .lnlen = sizeof("\no=") - 1,
142 .sname = "\ro=",
143 .snlen = sizeof("\ro=") - 1,
144 .ln_str = "IN IP4 ",
145 .ln_strlen = sizeof("IN IP4 ") - 1,
146 .match_len = epaddr_len
147 },
148 [POS_CONNECTION_IP4] = {/* SDP connection info */
149 .case_sensitive = 1,
150 .lname = "\nc=",
151 .lnlen = sizeof("\nc=") - 1,
152 .sname = "\rc=",
153 .snlen = sizeof("\rc=") - 1,
154 .ln_str = "IN IP4 ",
155 .ln_strlen = sizeof("IN IP4 ") - 1,
156 .match_len = epaddr_len
157 },
158 [POS_OWNER_IP6] = { /* SDP owner address*/
159 .case_sensitive = 1,
160 .lname = "\no=",
161 .lnlen = sizeof("\no=") - 1,
162 .sname = "\ro=",
163 .snlen = sizeof("\ro=") - 1,
164 .ln_str = "IN IP6 ",
165 .ln_strlen = sizeof("IN IP6 ") - 1,
166 .match_len = epaddr_len
167 },
168 [POS_CONNECTION_IP6] = {/* SDP connection info */
169 .case_sensitive = 1,
170 .lname = "\nc=",
171 .lnlen = sizeof("\nc=") - 1,
172 .sname = "\rc=",
173 .snlen = sizeof("\rc=") - 1,
174 .ln_str = "IN IP6 ",
175 .ln_strlen = sizeof("IN IP6 ") - 1,
176 .match_len = epaddr_len
177 },
178 [POS_SDP_HEADER] = { /* SDP version header */
179 .case_sensitive = 1,
180 .lname = "\nv=",
181 .lnlen = sizeof("\nv=") - 1,
182 .sname = "\rv=",
183 .snlen = sizeof("\rv=") - 1,
184 .ln_str = "=",
185 .ln_strlen = sizeof("=") - 1,
186 .match_len = digits_len
187 }
188};
189 74
190/* get line length until first CR or LF seen. */ 75unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
191int ct_sip_lnlen(const char *line, const char *limit) 76 const char **dptr,
192{ 77 unsigned int *datalen,
193 const char *k = line; 78 unsigned int matchoff,
79 unsigned int matchlen,
80 u_int16_t port) __read_mostly;
81EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
194 82
195 while ((line <= limit) && (*line == '\r' || *line == '\n')) 83unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
196 line++; 84 const char **dptr,
85 unsigned int dataoff,
86 unsigned int *datalen,
87 const union nf_inet_addr *addr)
88 __read_mostly;
89EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
197 90
198 while (line <= limit) { 91unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
199 if (*line == '\r' || *line == '\n') 92 const char **dptr,
200 break; 93 unsigned int *datalen,
201 line++; 94 struct nf_conntrack_expect *rtp_exp,
202 } 95 struct nf_conntrack_expect *rtcp_exp,
203 return line - k; 96 unsigned int mediaoff,
204} 97 unsigned int medialen,
205EXPORT_SYMBOL_GPL(ct_sip_lnlen); 98 union nf_inet_addr *rtp_addr)
99 __read_mostly;
100EXPORT_SYMBOL_GPL(nf_nat_sdp_media_hook);
206 101
207/* Linear string search, case sensitive. */ 102static int string_len(const struct nf_conn *ct, const char *dptr,
208const char *ct_sip_search(const char *needle, const char *haystack, 103 const char *limit, int *shift)
209 size_t needle_len, size_t haystack_len,
210 int case_sensitive)
211{ 104{
212 const char *limit = haystack + (haystack_len - needle_len); 105 int len = 0;
213 106
214 while (haystack <= limit) { 107 while (dptr < limit && isalpha(*dptr)) {
215 if (case_sensitive) { 108 dptr++;
216 if (strncmp(haystack, needle, needle_len) == 0) 109 len++;
217 return haystack;
218 } else {
219 if (strnicmp(haystack, needle, needle_len) == 0)
220 return haystack;
221 }
222 haystack++;
223 } 110 }
224 return NULL; 111 return len;
225} 112}
226EXPORT_SYMBOL_GPL(ct_sip_search);
227 113
228static int digits_len(const struct nf_conn *ct, const char *dptr, 114static int digits_len(const struct nf_conn *ct, const char *dptr,
229 const char *limit, int *shift) 115 const char *limit, int *shift)
230{ 116{
231 int len = 0; 117 int len = 0;
232 while (dptr <= limit && isdigit(*dptr)) { 118 while (dptr < limit && isdigit(*dptr)) {
233 dptr++; 119 dptr++;
234 len++; 120 len++;
235 } 121 }
236 return len; 122 return len;
237} 123}
238 124
239/* get digits length, skipping blank spaces. */ 125/* get media type + port length */
240static int skp_digits_len(const struct nf_conn *ct, const char *dptr, 126static int media_len(const struct nf_conn *ct, const char *dptr,
241 const char *limit, int *shift) 127 const char *limit, int *shift)
242{ 128{
243 for (; dptr <= limit && *dptr == ' '; dptr++) 129 int len = string_len(ct, dptr, limit, shift);
244 (*shift)++; 130
131 dptr += len;
132 if (dptr >= limit || *dptr != ' ')
133 return 0;
134 len++;
135 dptr++;
245 136
246 return digits_len(ct, dptr, limit, shift); 137 return len + digits_len(ct, dptr, limit, shift);
247} 138}
248 139
249static int parse_addr(const struct nf_conn *ct, const char *cp, 140static int parse_addr(const struct nf_conn *ct, const char *cp,
@@ -251,10 +142,10 @@ static int parse_addr(const struct nf_conn *ct, const char *cp,
251 const char *limit) 142 const char *limit)
252{ 143{
253 const char *end; 144 const char *end;
254 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
255 int ret = 0; 145 int ret = 0;
256 146
257 switch (family) { 147 memset(addr, 0, sizeof(*addr));
148 switch (nf_ct_l3num(ct)) {
258 case AF_INET: 149 case AF_INET:
259 ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); 150 ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
260 break; 151 break;
@@ -302,13 +193,13 @@ static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr,
302 /* Search for @, but stop at the end of the line. 193 /* Search for @, but stop at the end of the line.
303 * We are inside a sip: URI, so we don't need to worry about 194 * We are inside a sip: URI, so we don't need to worry about
304 * continuation lines. */ 195 * continuation lines. */
305 while (dptr <= limit && 196 while (dptr < limit &&
306 *dptr != '@' && *dptr != '\r' && *dptr != '\n') { 197 *dptr != '@' && *dptr != '\r' && *dptr != '\n') {
307 (*shift)++; 198 (*shift)++;
308 dptr++; 199 dptr++;
309 } 200 }
310 201
311 if (dptr <= limit && *dptr == '@') { 202 if (dptr < limit && *dptr == '@') {
312 dptr++; 203 dptr++;
313 (*shift)++; 204 (*shift)++;
314 } else { 205 } else {
@@ -319,74 +210,891 @@ static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr,
319 return epaddr_len(ct, dptr, limit, shift); 210 return epaddr_len(ct, dptr, limit, shift);
320} 211}
321 212
322/* Returns 0 if not found, -1 error parsing. */ 213/* Parse a SIP request line of the form:
323int ct_sip_get_info(const struct nf_conn *ct, 214 *
324 const char *dptr, size_t dlen, 215 * Request-Line = Method SP Request-URI SP SIP-Version CRLF
325 unsigned int *matchoff, 216 *
326 unsigned int *matchlen, 217 * and return the offset and length of the address contained in the Request-URI.
327 enum sip_header_pos pos) 218 */
219int ct_sip_parse_request(const struct nf_conn *ct,
220 const char *dptr, unsigned int datalen,
221 unsigned int *matchoff, unsigned int *matchlen,
222 union nf_inet_addr *addr, __be16 *port)
328{ 223{
329 const struct sip_header_nfo *hnfo = &ct_sip_hdrs[pos]; 224 const char *start = dptr, *limit = dptr + datalen, *end;
330 const char *limit, *aux, *k = dptr; 225 unsigned int mlen;
226 unsigned int p;
331 int shift = 0; 227 int shift = 0;
332 228
333 limit = dptr + (dlen - hnfo->lnlen); 229 /* Skip method and following whitespace */
230 mlen = string_len(ct, dptr, limit, NULL);
231 if (!mlen)
232 return 0;
233 dptr += mlen;
234 if (++dptr >= limit)
235 return 0;
236
237 /* Find SIP URI */
238 limit -= strlen("sip:");
239 for (; dptr < limit; dptr++) {
240 if (*dptr == '\r' || *dptr == '\n')
241 return -1;
242 if (strnicmp(dptr, "sip:", strlen("sip:")) == 0)
243 break;
244 }
245 if (!skp_epaddr_len(ct, dptr, limit, &shift))
246 return 0;
247 dptr += shift;
248
249 if (!parse_addr(ct, dptr, &end, addr, limit))
250 return -1;
251 if (end < limit && *end == ':') {
252 end++;
253 p = simple_strtoul(end, (char **)&end, 10);
254 if (p < 1024 || p > 65535)
255 return -1;
256 *port = htons(p);
257 } else
258 *port = htons(SIP_PORT);
259
260 if (end == dptr)
261 return 0;
262 *matchoff = dptr - start;
263 *matchlen = end - dptr;
264 return 1;
265}
266EXPORT_SYMBOL_GPL(ct_sip_parse_request);
267
268/* SIP header parsing: SIP headers are located at the beginning of a line, but
269 * may span several lines, in which case the continuation lines begin with a
270 * whitespace character. RFC 2543 allows lines to be terminated with CR, LF or
271 * CRLF, RFC 3261 allows only CRLF, we support both.
272 *
273 * Headers are followed by (optionally) whitespace, a colon, again (optionally)
274 * whitespace and the values. Whitespace in this context means any amount of
275 * tabs, spaces and continuation lines, which are treated as a single whitespace
276 * character.
277 *
278 * Some headers may appear multiple times. A comma seperated list of values is
279 * equivalent to multiple headers.
280 */
281static const struct sip_header ct_sip_hdrs[] = {
282 [SIP_HDR_CSEQ] = SIP_HDR("CSeq", NULL, NULL, digits_len),
283 [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len),
284 [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len),
285 [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len),
286 [SIP_HDR_VIA] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
287 [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len),
288 [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len),
289};
290
291static const char *sip_follow_continuation(const char *dptr, const char *limit)
292{
293 /* Walk past newline */
294 if (++dptr >= limit)
295 return NULL;
296
297 /* Skip '\n' in CR LF */
298 if (*(dptr - 1) == '\r' && *dptr == '\n') {
299 if (++dptr >= limit)
300 return NULL;
301 }
302
303 /* Continuation line? */
304 if (*dptr != ' ' && *dptr != '\t')
305 return NULL;
306
307 /* skip leading whitespace */
308 for (; dptr < limit; dptr++) {
309 if (*dptr != ' ' && *dptr != '\t')
310 break;
311 }
312 return dptr;
313}
314
315static const char *sip_skip_whitespace(const char *dptr, const char *limit)
316{
317 for (; dptr < limit; dptr++) {
318 if (*dptr == ' ')
319 continue;
320 if (*dptr != '\r' && *dptr != '\n')
321 break;
322 dptr = sip_follow_continuation(dptr, limit);
323 if (dptr == NULL)
324 return NULL;
325 }
326 return dptr;
327}
328
329/* Search within a SIP header value, dealing with continuation lines */
330static const char *ct_sip_header_search(const char *dptr, const char *limit,
331 const char *needle, unsigned int len)
332{
333 for (limit -= len; dptr < limit; dptr++) {
334 if (*dptr == '\r' || *dptr == '\n') {
335 dptr = sip_follow_continuation(dptr, limit);
336 if (dptr == NULL)
337 break;
338 continue;
339 }
340
341 if (strnicmp(dptr, needle, len) == 0)
342 return dptr;
343 }
344 return NULL;
345}
346
347int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
348 unsigned int dataoff, unsigned int datalen,
349 enum sip_header_types type,
350 unsigned int *matchoff, unsigned int *matchlen)
351{
352 const struct sip_header *hdr = &ct_sip_hdrs[type];
353 const char *start = dptr, *limit = dptr + datalen;
354 int shift = 0;
334 355
335 while (dptr <= limit) { 356 for (dptr += dataoff; dptr < limit; dptr++) {
336 if ((strncmp(dptr, hnfo->lname, hnfo->lnlen) != 0) && 357 /* Find beginning of line */
337 (hnfo->sname == NULL || 358 if (*dptr != '\r' && *dptr != '\n')
338 strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) {
339 dptr++;
340 continue; 359 continue;
360 if (++dptr >= limit)
361 break;
362 if (*(dptr - 1) == '\r' && *dptr == '\n') {
363 if (++dptr >= limit)
364 break;
341 } 365 }
342 aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, 366
343 ct_sip_lnlen(dptr, limit), 367 /* Skip continuation lines */
344 hnfo->case_sensitive); 368 if (*dptr == ' ' || *dptr == '\t')
345 if (!aux) { 369 continue;
346 pr_debug("'%s' not found in '%s'.\n", hnfo->ln_str, 370
347 hnfo->lname); 371 /* Find header. Compact headers must be followed by a
348 return -1; 372 * non-alphabetic character to avoid mismatches. */
373 if (limit - dptr >= hdr->len &&
374 strnicmp(dptr, hdr->name, hdr->len) == 0)
375 dptr += hdr->len;
376 else if (hdr->cname && limit - dptr >= hdr->clen + 1 &&
377 strnicmp(dptr, hdr->cname, hdr->clen) == 0 &&
378 !isalpha(*(dptr + hdr->clen + 1)))
379 dptr += hdr->clen;
380 else
381 continue;
382
383 /* Find and skip colon */
384 dptr = sip_skip_whitespace(dptr, limit);
385 if (dptr == NULL)
386 break;
387 if (*dptr != ':' || ++dptr >= limit)
388 break;
389
390 /* Skip whitespace after colon */
391 dptr = sip_skip_whitespace(dptr, limit);
392 if (dptr == NULL)
393 break;
394
395 *matchoff = dptr - start;
396 if (hdr->search) {
397 dptr = ct_sip_header_search(dptr, limit, hdr->search,
398 hdr->slen);
399 if (!dptr)
400 return -1;
401 dptr += hdr->slen;
349 } 402 }
350 aux += hnfo->ln_strlen;
351 403
352 *matchlen = hnfo->match_len(ct, aux, limit, &shift); 404 *matchlen = hdr->match_len(ct, dptr, limit, &shift);
353 if (!*matchlen) 405 if (!*matchlen)
354 return -1; 406 return -1;
407 *matchoff = dptr - start + shift;
408 return 1;
409 }
410 return 0;
411}
412EXPORT_SYMBOL_GPL(ct_sip_get_header);
413
414/* Get next header field in a list of comma seperated values */
415static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr,
416 unsigned int dataoff, unsigned int datalen,
417 enum sip_header_types type,
418 unsigned int *matchoff, unsigned int *matchlen)
419{
420 const struct sip_header *hdr = &ct_sip_hdrs[type];
421 const char *start = dptr, *limit = dptr + datalen;
422 int shift = 0;
423
424 dptr += dataoff;
425
426 dptr = ct_sip_header_search(dptr, limit, ",", strlen(","));
427 if (!dptr)
428 return 0;
429
430 dptr = ct_sip_header_search(dptr, limit, hdr->search, hdr->slen);
431 if (!dptr)
432 return 0;
433 dptr += hdr->slen;
434
435 *matchoff = dptr - start;
436 *matchlen = hdr->match_len(ct, dptr, limit, &shift);
437 if (!*matchlen)
438 return -1;
439 *matchoff += shift;
440 return 1;
441}
442
443/* Walk through headers until a parsable one is found or no header of the
444 * given type is left. */
445static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
446 unsigned int dataoff, unsigned int datalen,
447 enum sip_header_types type, int *in_header,
448 unsigned int *matchoff, unsigned int *matchlen)
449{
450 int ret;
451
452 if (in_header && *in_header) {
453 while (1) {
454 ret = ct_sip_next_header(ct, dptr, dataoff, datalen,
455 type, matchoff, matchlen);
456 if (ret > 0)
457 return ret;
458 if (ret == 0)
459 break;
460 dataoff += *matchoff;
461 }
462 *in_header = 0;
463 }
464
465 while (1) {
466 ret = ct_sip_get_header(ct, dptr, dataoff, datalen,
467 type, matchoff, matchlen);
468 if (ret > 0)
469 break;
470 if (ret == 0)
471 return ret;
472 dataoff += *matchoff;
473 }
474
475 if (in_header)
476 *in_header = 1;
477 return 1;
478}
479
480/* Locate a SIP header, parse the URI and return the offset and length of
481 * the address as well as the address and port themselves. A stream of
482 * headers can be parsed by handing in a non-NULL datalen and in_header
483 * pointer.
484 */
485int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
486 unsigned int *dataoff, unsigned int datalen,
487 enum sip_header_types type, int *in_header,
488 unsigned int *matchoff, unsigned int *matchlen,
489 union nf_inet_addr *addr, __be16 *port)
490{
491 const char *c, *limit = dptr + datalen;
492 unsigned int p;
493 int ret;
494
495 ret = ct_sip_walk_headers(ct, dptr, dataoff ? *dataoff : 0, datalen,
496 type, in_header, matchoff, matchlen);
497 WARN_ON(ret < 0);
498 if (ret == 0)
499 return ret;
500
501 if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit))
502 return -1;
503 if (*c == ':') {
504 c++;
505 p = simple_strtoul(c, (char **)&c, 10);
506 if (p < 1024 || p > 65535)
507 return -1;
508 *port = htons(p);
509 } else
510 *port = htons(SIP_PORT);
355 511
356 *matchoff = (aux - k) + shift; 512 if (dataoff)
513 *dataoff = c - dptr;
514 return 1;
515}
516EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri);
517
518/* Parse address from header parameter and return address, offset and length */
519int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
520 unsigned int dataoff, unsigned int datalen,
521 const char *name,
522 unsigned int *matchoff, unsigned int *matchlen,
523 union nf_inet_addr *addr)
524{
525 const char *limit = dptr + datalen;
526 const char *start, *end;
527
528 limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
529 if (!limit)
530 limit = dptr + datalen;
531
532 start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
533 if (!start)
534 return 0;
535
536 start += strlen(name);
537 if (!parse_addr(ct, start, &end, addr, limit))
538 return 0;
539 *matchoff = start - dptr;
540 *matchlen = end - start;
541 return 1;
542}
543EXPORT_SYMBOL_GPL(ct_sip_parse_address_param);
357 544
358 pr_debug("%s match succeeded! - len: %u\n", hnfo->lname, 545/* Parse numerical header parameter and return value, offset and length */
359 *matchlen); 546int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
547 unsigned int dataoff, unsigned int datalen,
548 const char *name,
549 unsigned int *matchoff, unsigned int *matchlen,
550 unsigned int *val)
551{
552 const char *limit = dptr + datalen;
553 const char *start;
554 char *end;
555
556 limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
557 if (!limit)
558 limit = dptr + datalen;
559
560 start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
561 if (!start)
562 return 0;
563
564 start += strlen(name);
565 *val = simple_strtoul(start, &end, 0);
566 if (start == end)
567 return 0;
568 if (matchoff && matchlen) {
569 *matchoff = start - dptr;
570 *matchlen = end - start;
571 }
572 return 1;
573}
574EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param);
575
576/* SDP header parsing: a SDP session description contains an ordered set of
577 * headers, starting with a section containing general session parameters,
578 * optionally followed by multiple media descriptions.
579 *
580 * SDP headers always start at the beginning of a line. According to RFC 2327:
581 * "The sequence CRLF (0x0d0a) is used to end a record, although parsers should
582 * be tolerant and also accept records terminated with a single newline
583 * character". We handle both cases.
584 */
585static const struct sip_header ct_sdp_hdrs[] = {
586 [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len),
587 [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len),
588 [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len),
589 [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len),
590 [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len),
591 [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len),
592};
593
594/* Linear string search within SDP header values */
595static const char *ct_sdp_header_search(const char *dptr, const char *limit,
596 const char *needle, unsigned int len)
597{
598 for (limit -= len; dptr < limit; dptr++) {
599 if (*dptr == '\r' || *dptr == '\n')
600 break;
601 if (strncmp(dptr, needle, len) == 0)
602 return dptr;
603 }
604 return NULL;
605}
606
607/* Locate a SDP header (optionally a substring within the header value),
608 * optionally stopping at the first occurence of the term header, parse
609 * it and return the offset and length of the data we're interested in.
610 */
611int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
612 unsigned int dataoff, unsigned int datalen,
613 enum sdp_header_types type,
614 enum sdp_header_types term,
615 unsigned int *matchoff, unsigned int *matchlen)
616{
617 const struct sip_header *hdr = &ct_sdp_hdrs[type];
618 const struct sip_header *thdr = &ct_sdp_hdrs[term];
619 const char *start = dptr, *limit = dptr + datalen;
620 int shift = 0;
621
622 for (dptr += dataoff; dptr < limit; dptr++) {
623 /* Find beginning of line */
624 if (*dptr != '\r' && *dptr != '\n')
625 continue;
626 if (++dptr >= limit)
627 break;
628 if (*(dptr - 1) == '\r' && *dptr == '\n') {
629 if (++dptr >= limit)
630 break;
631 }
632
633 if (term != SDP_HDR_UNSPEC &&
634 limit - dptr >= thdr->len &&
635 strnicmp(dptr, thdr->name, thdr->len) == 0)
636 break;
637 else if (limit - dptr >= hdr->len &&
638 strnicmp(dptr, hdr->name, hdr->len) == 0)
639 dptr += hdr->len;
640 else
641 continue;
642
643 *matchoff = dptr - start;
644 if (hdr->search) {
645 dptr = ct_sdp_header_search(dptr, limit, hdr->search,
646 hdr->slen);
647 if (!dptr)
648 return -1;
649 dptr += hdr->slen;
650 }
651
652 *matchlen = hdr->match_len(ct, dptr, limit, &shift);
653 if (!*matchlen)
654 return -1;
655 *matchoff = dptr - start + shift;
360 return 1; 656 return 1;
361 } 657 }
362 pr_debug("%s header not found.\n", hnfo->lname);
363 return 0; 658 return 0;
364} 659}
365EXPORT_SYMBOL_GPL(ct_sip_get_info); 660EXPORT_SYMBOL_GPL(ct_sip_get_sdp_header);
366 661
367static int set_expected_rtp(struct sk_buff *skb, 662static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
368 struct nf_conn *ct, 663 unsigned int dataoff, unsigned int datalen,
369 enum ip_conntrack_info ctinfo, 664 enum sdp_header_types type,
370 union nf_inet_addr *addr, 665 enum sdp_header_types term,
371 __be16 port, 666 unsigned int *matchoff, unsigned int *matchlen,
372 const char *dptr) 667 union nf_inet_addr *addr)
373{ 668{
669 int ret;
670
671 ret = ct_sip_get_sdp_header(ct, dptr, dataoff, datalen, type, term,
672 matchoff, matchlen);
673 if (ret <= 0)
674 return ret;
675
676 if (!parse_addr(ct, dptr + *matchoff, NULL, addr,
677 dptr + *matchoff + *matchlen))
678 return -1;
679 return 1;
680}
681
682static int refresh_signalling_expectation(struct nf_conn *ct,
683 union nf_inet_addr *addr,
684 __be16 port,
685 unsigned int expires)
686{
687 struct nf_conn_help *help = nfct_help(ct);
374 struct nf_conntrack_expect *exp; 688 struct nf_conntrack_expect *exp;
689 struct hlist_node *n, *next;
690 int found = 0;
691
692 spin_lock_bh(&nf_conntrack_lock);
693 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
694 if (exp->class != SIP_EXPECT_SIGNALLING ||
695 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
696 exp->tuple.dst.u.udp.port != port)
697 continue;
698 if (!del_timer(&exp->timeout))
699 continue;
700 exp->flags &= ~NF_CT_EXPECT_INACTIVE;
701 exp->timeout.expires = jiffies + expires * HZ;
702 add_timer(&exp->timeout);
703 found = 1;
704 break;
705 }
706 spin_unlock_bh(&nf_conntrack_lock);
707 return found;
708}
709
710static void flush_expectations(struct nf_conn *ct, bool media)
711{
712 struct nf_conn_help *help = nfct_help(ct);
713 struct nf_conntrack_expect *exp;
714 struct hlist_node *n, *next;
715
716 spin_lock_bh(&nf_conntrack_lock);
717 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
718 if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
719 continue;
720 if (!del_timer(&exp->timeout))
721 continue;
722 nf_ct_unlink_expect(exp);
723 nf_ct_expect_put(exp);
724 if (!media)
725 break;
726 }
727 spin_unlock_bh(&nf_conntrack_lock);
728}
729
730static int set_expected_rtp_rtcp(struct sk_buff *skb,
731 const char **dptr, unsigned int *datalen,
732 union nf_inet_addr *daddr, __be16 port,
733 enum sip_expectation_classes class,
734 unsigned int mediaoff, unsigned int medialen)
735{
736 struct nf_conntrack_expect *exp, *rtp_exp, *rtcp_exp;
737 enum ip_conntrack_info ctinfo;
738 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
739 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
740 union nf_inet_addr *saddr;
741 struct nf_conntrack_tuple tuple;
742 int direct_rtp = 0, skip_expect = 0, ret = NF_DROP;
743 u_int16_t base_port;
744 __be16 rtp_port, rtcp_port;
745 typeof(nf_nat_sdp_port_hook) nf_nat_sdp_port;
746 typeof(nf_nat_sdp_media_hook) nf_nat_sdp_media;
747
748 saddr = NULL;
749 if (sip_direct_media) {
750 if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3))
751 return NF_ACCEPT;
752 saddr = &ct->tuplehash[!dir].tuple.src.u3;
753 }
754
755 /* We need to check whether the registration exists before attempting
756 * to register it since we can see the same media description multiple
757 * times on different connections in case multiple endpoints receive
758 * the same call.
759 *
760 * RTP optimization: if we find a matching media channel expectation
761 * and both the expectation and this connection are SNATed, we assume
762 * both sides can reach each other directly and use the final
763 * destination address from the expectation. We still need to keep
764 * the NATed expectations for media that might arrive from the
765 * outside, and additionally need to expect the direct RTP stream
766 * in case it passes through us even without NAT.
767 */
768 memset(&tuple, 0, sizeof(tuple));
769 if (saddr)
770 tuple.src.u3 = *saddr;
771 tuple.src.l3num = nf_ct_l3num(ct);
772 tuple.dst.protonum = IPPROTO_UDP;
773 tuple.dst.u3 = *daddr;
774 tuple.dst.u.udp.port = port;
775
776 rcu_read_lock();
777 do {
778 exp = __nf_ct_expect_find(&tuple);
779
780 if (!exp || exp->master == ct ||
781 nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
782 exp->class != class)
783 break;
784
785 if (exp->tuple.src.l3num == AF_INET && !direct_rtp &&
786 (exp->saved_ip != exp->tuple.dst.u3.ip ||
787 exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) &&
788 ct->status & IPS_NAT_MASK) {
789 daddr->ip = exp->saved_ip;
790 tuple.dst.u3.ip = exp->saved_ip;
791 tuple.dst.u.udp.port = exp->saved_proto.udp.port;
792 direct_rtp = 1;
793 } else
794 skip_expect = 1;
795 } while (!skip_expect);
796 rcu_read_unlock();
797
798 base_port = ntohs(tuple.dst.u.udp.port) & ~1;
799 rtp_port = htons(base_port);
800 rtcp_port = htons(base_port + 1);
801
802 if (direct_rtp) {
803 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
804 if (nf_nat_sdp_port &&
805 !nf_nat_sdp_port(skb, dptr, datalen,
806 mediaoff, medialen, ntohs(rtp_port)))
807 goto err1;
808 }
809
810 if (skip_expect)
811 return NF_ACCEPT;
812
813 rtp_exp = nf_ct_expect_alloc(ct);
814 if (rtp_exp == NULL)
815 goto err1;
816 nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr,
817 IPPROTO_UDP, NULL, &rtp_port);
818
819 rtcp_exp = nf_ct_expect_alloc(ct);
820 if (rtcp_exp == NULL)
821 goto err2;
822 nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
823 IPPROTO_UDP, NULL, &rtcp_port);
824
825 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
826 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
827 ret = nf_nat_sdp_media(skb, dptr, datalen, rtp_exp, rtcp_exp,
828 mediaoff, medialen, daddr);
829 else {
830 if (nf_ct_expect_related(rtp_exp) == 0) {
831 if (nf_ct_expect_related(rtcp_exp) != 0)
832 nf_ct_unexpect_related(rtp_exp);
833 else
834 ret = NF_ACCEPT;
835 }
836 }
837 nf_ct_expect_put(rtcp_exp);
838err2:
839 nf_ct_expect_put(rtp_exp);
840err1:
841 return ret;
842}
843
844static const struct sdp_media_type sdp_media_types[] = {
845 SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO),
846 SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO),
847};
848
849static const struct sdp_media_type *sdp_media_type(const char *dptr,
850 unsigned int matchoff,
851 unsigned int matchlen)
852{
853 const struct sdp_media_type *t;
854 unsigned int i;
855
856 for (i = 0; i < ARRAY_SIZE(sdp_media_types); i++) {
857 t = &sdp_media_types[i];
858 if (matchlen < t->len ||
859 strncmp(dptr + matchoff, t->name, t->len))
860 continue;
861 return t;
862 }
863 return NULL;
864}
865
866static int process_sdp(struct sk_buff *skb,
867 const char **dptr, unsigned int *datalen,
868 unsigned int cseq)
869{
870 enum ip_conntrack_info ctinfo;
871 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
872 unsigned int matchoff, matchlen;
873 unsigned int mediaoff, medialen;
874 unsigned int sdpoff;
875 unsigned int caddr_len, maddr_len;
876 unsigned int i;
877 union nf_inet_addr caddr, maddr, rtp_addr;
878 unsigned int port;
879 enum sdp_header_types c_hdr;
880 const struct sdp_media_type *t;
881 int ret = NF_ACCEPT;
882 typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr;
883 typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
884
885 nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
886 c_hdr = nf_ct_l3num(ct) == AF_INET ? SDP_HDR_CONNECTION_IP4 :
887 SDP_HDR_CONNECTION_IP6;
888
889 /* Find beginning of session description */
890 if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
891 SDP_HDR_VERSION, SDP_HDR_UNSPEC,
892 &matchoff, &matchlen) <= 0)
893 return NF_ACCEPT;
894 sdpoff = matchoff;
895
896 /* The connection information is contained in the session description
897 * and/or once per media description. The first media description marks
898 * the end of the session description. */
899 caddr_len = 0;
900 if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen,
901 c_hdr, SDP_HDR_MEDIA,
902 &matchoff, &matchlen, &caddr) > 0)
903 caddr_len = matchlen;
904
905 mediaoff = sdpoff;
906 for (i = 0; i < ARRAY_SIZE(sdp_media_types); ) {
907 if (ct_sip_get_sdp_header(ct, *dptr, mediaoff, *datalen,
908 SDP_HDR_MEDIA, SDP_HDR_UNSPEC,
909 &mediaoff, &medialen) <= 0)
910 break;
911
912 /* Get media type and port number. A media port value of zero
913 * indicates an inactive stream. */
914 t = sdp_media_type(*dptr, mediaoff, medialen);
915 if (!t) {
916 mediaoff += medialen;
917 continue;
918 }
919 mediaoff += t->len;
920 medialen -= t->len;
921
922 port = simple_strtoul(*dptr + mediaoff, NULL, 10);
923 if (port == 0)
924 continue;
925 if (port < 1024 || port > 65535)
926 return NF_DROP;
927
928 /* The media description overrides the session description. */
929 maddr_len = 0;
930 if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen,
931 c_hdr, SDP_HDR_MEDIA,
932 &matchoff, &matchlen, &maddr) > 0) {
933 maddr_len = matchlen;
934 memcpy(&rtp_addr, &maddr, sizeof(rtp_addr));
935 } else if (caddr_len)
936 memcpy(&rtp_addr, &caddr, sizeof(rtp_addr));
937 else
938 return NF_DROP;
939
940 ret = set_expected_rtp_rtcp(skb, dptr, datalen,
941 &rtp_addr, htons(port), t->class,
942 mediaoff, medialen);
943 if (ret != NF_ACCEPT)
944 return ret;
945
946 /* Update media connection address if present */
947 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
948 ret = nf_nat_sdp_addr(skb, dptr, mediaoff, datalen,
949 c_hdr, SDP_HDR_MEDIA, &rtp_addr);
950 if (ret != NF_ACCEPT)
951 return ret;
952 }
953 i++;
954 }
955
956 /* Update session connection and owner addresses */
957 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
958 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
959 ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr);
960
961 return ret;
962}
963static int process_invite_response(struct sk_buff *skb,
964 const char **dptr, unsigned int *datalen,
965 unsigned int cseq, unsigned int code)
966{
967 enum ip_conntrack_info ctinfo;
968 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
969
970 if ((code >= 100 && code <= 199) ||
971 (code >= 200 && code <= 299))
972 return process_sdp(skb, dptr, datalen, cseq);
973 else {
974 flush_expectations(ct, true);
975 return NF_ACCEPT;
976 }
977}
978
979static int process_update_response(struct sk_buff *skb,
980 const char **dptr, unsigned int *datalen,
981 unsigned int cseq, unsigned int code)
982{
983 enum ip_conntrack_info ctinfo;
984 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
985
986 if ((code >= 100 && code <= 199) ||
987 (code >= 200 && code <= 299))
988 return process_sdp(skb, dptr, datalen, cseq);
989 else {
990 flush_expectations(ct, true);
991 return NF_ACCEPT;
992 }
993}
994
995static int process_prack_response(struct sk_buff *skb,
996 const char **dptr, unsigned int *datalen,
997 unsigned int cseq, unsigned int code)
998{
999 enum ip_conntrack_info ctinfo;
1000 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1001
1002 if ((code >= 100 && code <= 199) ||
1003 (code >= 200 && code <= 299))
1004 return process_sdp(skb, dptr, datalen, cseq);
1005 else {
1006 flush_expectations(ct, true);
1007 return NF_ACCEPT;
1008 }
1009}
1010
1011static int process_bye_request(struct sk_buff *skb,
1012 const char **dptr, unsigned int *datalen,
1013 unsigned int cseq)
1014{
1015 enum ip_conntrack_info ctinfo;
1016 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1017
1018 flush_expectations(ct, true);
1019 return NF_ACCEPT;
1020}
1021
1022/* Parse a REGISTER request and create a permanent expectation for incoming
1023 * signalling connections. The expectation is marked inactive and is activated
1024 * when receiving a response indicating success from the registrar.
1025 */
1026static int process_register_request(struct sk_buff *skb,
1027 const char **dptr, unsigned int *datalen,
1028 unsigned int cseq)
1029{
1030 enum ip_conntrack_info ctinfo;
1031 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1032 struct nf_conn_help *help = nfct_help(ct);
375 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 1033 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
376 int family = ct->tuplehash[!dir].tuple.src.l3num; 1034 unsigned int matchoff, matchlen;
1035 struct nf_conntrack_expect *exp;
1036 union nf_inet_addr *saddr, daddr;
1037 __be16 port;
1038 unsigned int expires = 0;
377 int ret; 1039 int ret;
378 typeof(nf_nat_sdp_hook) nf_nat_sdp; 1040 typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
1041
1042 /* Expected connections can not register again. */
1043 if (ct->status & IPS_EXPECTED)
1044 return NF_ACCEPT;
1045
1046 /* We must check the expiration time: a value of zero signals the
1047 * registrar to release the binding. We'll remove our expectation
1048 * when receiving the new bindings in the response, but we don't
1049 * want to create new ones.
1050 *
1051 * The expiration time may be contained in Expires: header, the
1052 * Contact: header parameters or the URI parameters.
1053 */
1054 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES,
1055 &matchoff, &matchlen) > 0)
1056 expires = simple_strtoul(*dptr + matchoff, NULL, 10);
1057
1058 ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
1059 SIP_HDR_CONTACT, NULL,
1060 &matchoff, &matchlen, &daddr, &port);
1061 if (ret < 0)
1062 return NF_DROP;
1063 else if (ret == 0)
1064 return NF_ACCEPT;
1065
1066 /* We don't support third-party registrations */
1067 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr))
1068 return NF_ACCEPT;
1069
1070 if (ct_sip_parse_numerical_param(ct, *dptr,
1071 matchoff + matchlen, *datalen,
1072 "expires=", NULL, NULL, &expires) < 0)
1073 return NF_DROP;
1074
1075 if (expires == 0) {
1076 ret = NF_ACCEPT;
1077 goto store_cseq;
1078 }
379 1079
380 exp = nf_ct_expect_alloc(ct); 1080 exp = nf_ct_expect_alloc(ct);
381 if (exp == NULL) 1081 if (!exp)
382 return NF_DROP; 1082 return NF_DROP;
383 nf_ct_expect_init(exp, family,
384 &ct->tuplehash[!dir].tuple.src.u3, addr,
385 IPPROTO_UDP, NULL, &port);
386 1083
387 nf_nat_sdp = rcu_dereference(nf_nat_sdp_hook); 1084 saddr = NULL;
388 if (nf_nat_sdp && ct->status & IPS_NAT_MASK) 1085 if (sip_direct_signalling)
389 ret = nf_nat_sdp(skb, ctinfo, exp, dptr); 1086 saddr = &ct->tuplehash[!dir].tuple.src.u3;
1087
1088 nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
1089 saddr, &daddr, IPPROTO_UDP, NULL, &port);
1090 exp->timeout.expires = sip_timeout * HZ;
1091 exp->helper = nfct_help(ct)->helper;
1092 exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
1093
1094 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
1095 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
1096 ret = nf_nat_sip_expect(skb, dptr, datalen, exp,
1097 matchoff, matchlen);
390 else { 1098 else {
391 if (nf_ct_expect_related(exp) != 0) 1099 if (nf_ct_expect_related(exp) != 0)
392 ret = NF_DROP; 1100 ret = NF_DROP;
@@ -395,22 +1103,160 @@ static int set_expected_rtp(struct sk_buff *skb,
395 } 1103 }
396 nf_ct_expect_put(exp); 1104 nf_ct_expect_put(exp);
397 1105
1106store_cseq:
1107 if (ret == NF_ACCEPT)
1108 help->help.ct_sip_info.register_cseq = cseq;
398 return ret; 1109 return ret;
399} 1110}
400 1111
1112static int process_register_response(struct sk_buff *skb,
1113 const char **dptr, unsigned int *datalen,
1114 unsigned int cseq, unsigned int code)
1115{
1116 enum ip_conntrack_info ctinfo;
1117 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1118 struct nf_conn_help *help = nfct_help(ct);
1119 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1120 union nf_inet_addr addr;
1121 __be16 port;
1122 unsigned int matchoff, matchlen, dataoff = 0;
1123 unsigned int expires = 0;
1124 int in_contact = 0, ret;
1125
1126 /* According to RFC 3261, "UAs MUST NOT send a new registration until
1127 * they have received a final response from the registrar for the
1128 * previous one or the previous REGISTER request has timed out".
1129 *
1130 * However, some servers fail to detect retransmissions and send late
1131 * responses, so we store the sequence number of the last valid
1132 * request and compare it here.
1133 */
1134 if (help->help.ct_sip_info.register_cseq != cseq)
1135 return NF_ACCEPT;
1136
1137 if (code >= 100 && code <= 199)
1138 return NF_ACCEPT;
1139 if (code < 200 || code > 299)
1140 goto flush;
1141
1142 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES,
1143 &matchoff, &matchlen) > 0)
1144 expires = simple_strtoul(*dptr + matchoff, NULL, 10);
1145
1146 while (1) {
1147 unsigned int c_expires = expires;
1148
1149 ret = ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen,
1150 SIP_HDR_CONTACT, &in_contact,
1151 &matchoff, &matchlen,
1152 &addr, &port);
1153 if (ret < 0)
1154 return NF_DROP;
1155 else if (ret == 0)
1156 break;
1157
1158 /* We don't support third-party registrations */
1159 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr))
1160 continue;
1161
1162 ret = ct_sip_parse_numerical_param(ct, *dptr,
1163 matchoff + matchlen,
1164 *datalen, "expires=",
1165 NULL, NULL, &c_expires);
1166 if (ret < 0)
1167 return NF_DROP;
1168 if (c_expires == 0)
1169 break;
1170 if (refresh_signalling_expectation(ct, &addr, port, c_expires))
1171 return NF_ACCEPT;
1172 }
1173
1174flush:
1175 flush_expectations(ct, false);
1176 return NF_ACCEPT;
1177}
1178
1179static const struct sip_handler sip_handlers[] = {
1180 SIP_HANDLER("INVITE", process_sdp, process_invite_response),
1181 SIP_HANDLER("UPDATE", process_sdp, process_update_response),
1182 SIP_HANDLER("ACK", process_sdp, NULL),
1183 SIP_HANDLER("PRACK", process_sdp, process_prack_response),
1184 SIP_HANDLER("BYE", process_bye_request, NULL),
1185 SIP_HANDLER("REGISTER", process_register_request, process_register_response),
1186};
1187
1188static int process_sip_response(struct sk_buff *skb,
1189 const char **dptr, unsigned int *datalen)
1190{
1191 static const struct sip_handler *handler;
1192 enum ip_conntrack_info ctinfo;
1193 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1194 unsigned int matchoff, matchlen;
1195 unsigned int code, cseq, dataoff, i;
1196
1197 if (*datalen < strlen("SIP/2.0 200"))
1198 return NF_ACCEPT;
1199 code = simple_strtoul(*dptr + strlen("SIP/2.0 "), NULL, 10);
1200 if (!code)
1201 return NF_DROP;
1202
1203 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
1204 &matchoff, &matchlen) <= 0)
1205 return NF_DROP;
1206 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1207 if (!cseq)
1208 return NF_DROP;
1209 dataoff = matchoff + matchlen + 1;
1210
1211 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1212 handler = &sip_handlers[i];
1213 if (handler->response == NULL)
1214 continue;
1215 if (*datalen < dataoff + handler->len ||
1216 strnicmp(*dptr + dataoff, handler->method, handler->len))
1217 continue;
1218 return handler->response(skb, dptr, datalen, cseq, code);
1219 }
1220 return NF_ACCEPT;
1221}
1222
1223static int process_sip_request(struct sk_buff *skb,
1224 const char **dptr, unsigned int *datalen)
1225{
1226 static const struct sip_handler *handler;
1227 enum ip_conntrack_info ctinfo;
1228 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1229 unsigned int matchoff, matchlen;
1230 unsigned int cseq, i;
1231
1232 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1233 handler = &sip_handlers[i];
1234 if (handler->request == NULL)
1235 continue;
1236 if (*datalen < handler->len ||
1237 strnicmp(*dptr, handler->method, handler->len))
1238 continue;
1239
1240 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
1241 &matchoff, &matchlen) <= 0)
1242 return NF_DROP;
1243 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1244 if (!cseq)
1245 return NF_DROP;
1246
1247 return handler->request(skb, dptr, datalen, cseq);
1248 }
1249 return NF_ACCEPT;
1250}
1251
401static int sip_help(struct sk_buff *skb, 1252static int sip_help(struct sk_buff *skb,
402 unsigned int protoff, 1253 unsigned int protoff,
403 struct nf_conn *ct, 1254 struct nf_conn *ct,
404 enum ip_conntrack_info ctinfo) 1255 enum ip_conntrack_info ctinfo)
405{ 1256{
406 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
407 union nf_inet_addr addr;
408 unsigned int dataoff, datalen; 1257 unsigned int dataoff, datalen;
409 const char *dptr; 1258 const char *dptr;
410 int ret = NF_ACCEPT; 1259 int ret;
411 unsigned int matchoff, matchlen;
412 u_int16_t port;
413 enum sip_header_pos pos;
414 typeof(nf_nat_sip_hook) nf_nat_sip; 1260 typeof(nf_nat_sip_hook) nf_nat_sip;
415 1261
416 /* No Data ? */ 1262 /* No Data ? */
@@ -424,58 +1270,45 @@ static int sip_help(struct sk_buff *skb,
424 dptr = skb->data + dataoff; 1270 dptr = skb->data + dataoff;
425 else { 1271 else {
426 pr_debug("Copy of skbuff not supported yet.\n"); 1272 pr_debug("Copy of skbuff not supported yet.\n");
427 goto out; 1273 return NF_ACCEPT;
428 }
429
430 nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
431 if (nf_nat_sip && ct->status & IPS_NAT_MASK) {
432 if (!nf_nat_sip(skb, ctinfo, ct, &dptr)) {
433 ret = NF_DROP;
434 goto out;
435 }
436 } 1274 }
437 1275
438 datalen = skb->len - dataoff; 1276 datalen = skb->len - dataoff;
439 if (datalen < sizeof("SIP/2.0 200") - 1) 1277 if (datalen < strlen("SIP/2.0 200"))
440 goto out; 1278 return NF_ACCEPT;
441
442 /* RTP info only in some SDP pkts */
443 if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) != 0 &&
444 memcmp(dptr, "UPDATE", sizeof("UPDATE") - 1) != 0 &&
445 memcmp(dptr, "SIP/2.0 180", sizeof("SIP/2.0 180") - 1) != 0 &&
446 memcmp(dptr, "SIP/2.0 183", sizeof("SIP/2.0 183") - 1) != 0 &&
447 memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) != 0) {
448 goto out;
449 }
450 /* Get address and port from SDP packet. */
451 pos = family == AF_INET ? POS_CONNECTION_IP4 : POS_CONNECTION_IP6;
452 if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen, pos) > 0) {
453
454 /* We'll drop only if there are parse problems. */
455 if (!parse_addr(ct, dptr + matchoff, NULL, &addr,
456 dptr + datalen)) {
457 ret = NF_DROP;
458 goto out;
459 }
460 if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen,
461 POS_MEDIA) > 0) {
462 1279
463 port = simple_strtoul(dptr + matchoff, NULL, 10); 1280 if (strnicmp(dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
464 if (port < 1024) { 1281 ret = process_sip_request(skb, &dptr, &datalen);
465 ret = NF_DROP; 1282 else
466 goto out; 1283 ret = process_sip_response(skb, &dptr, &datalen);
467 } 1284
468 ret = set_expected_rtp(skb, ct, ctinfo, &addr, 1285 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
469 htons(port), dptr); 1286 nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
470 } 1287 if (nf_nat_sip && !nf_nat_sip(skb, &dptr, &datalen))
1288 ret = NF_DROP;
471 } 1289 }
472out: 1290
473 return ret; 1291 return ret;
474} 1292}
475 1293
476static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly; 1294static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly;
477static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly; 1295static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly;
478 1296
1297static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
1298 [SIP_EXPECT_SIGNALLING] = {
1299 .max_expected = 1,
1300 .timeout = 3 * 60,
1301 },
1302 [SIP_EXPECT_AUDIO] = {
1303 .max_expected = 2 * IP_CT_DIR_MAX,
1304 .timeout = 3 * 60,
1305 },
1306 [SIP_EXPECT_VIDEO] = {
1307 .max_expected = 2 * IP_CT_DIR_MAX,
1308 .timeout = 3 * 60,
1309 },
1310};
1311
479static void nf_conntrack_sip_fini(void) 1312static void nf_conntrack_sip_fini(void)
480{ 1313{
481 int i, j; 1314 int i, j;
@@ -505,8 +1338,8 @@ static int __init nf_conntrack_sip_init(void)
505 for (j = 0; j < 2; j++) { 1338 for (j = 0; j < 2; j++) {
506 sip[i][j].tuple.dst.protonum = IPPROTO_UDP; 1339 sip[i][j].tuple.dst.protonum = IPPROTO_UDP;
507 sip[i][j].tuple.src.u.udp.port = htons(ports[i]); 1340 sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
508 sip[i][j].max_expected = 2; 1341 sip[i][j].expect_policy = sip_exp_policy;
509 sip[i][j].timeout = 3 * 60; /* 3 minutes */ 1342 sip[i][j].expect_class_max = SIP_EXPECT_MAX;
510 sip[i][j].me = THIS_MODULE; 1343 sip[i][j].me = THIS_MODULE;
511 sip[i][j].help = sip_help; 1344 sip[i][j].help = sip_help;
512 1345
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 8599068050ec..b59871f6bdda 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -127,21 +127,14 @@ static int ct_seq_show(struct seq_file *s, void *v)
127 if (NF_CT_DIRECTION(hash)) 127 if (NF_CT_DIRECTION(hash))
128 return 0; 128 return 0;
129 129
130 l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL] 130 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
131 .tuple.src.l3num);
132
133 NF_CT_ASSERT(l3proto); 131 NF_CT_ASSERT(l3proto);
134 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL] 132 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
135 .tuple.src.l3num,
136 ct->tuplehash[IP_CT_DIR_ORIGINAL]
137 .tuple.dst.protonum);
138 NF_CT_ASSERT(l4proto); 133 NF_CT_ASSERT(l4proto);
139 134
140 if (seq_printf(s, "%-8s %u %-8s %u %ld ", 135 if (seq_printf(s, "%-8s %u %-8s %u %ld ",
141 l3proto->name, 136 l3proto->name, nf_ct_l3num(ct),
142 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, 137 l4proto->name, nf_ct_protonum(ct),
143 l4proto->name,
144 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
145 timer_pending(&ct->timeout) 138 timer_pending(&ct->timeout)
146 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) 139 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
147 return -ENOSPC; 140 return -ENOSPC;
@@ -293,8 +286,43 @@ static const struct file_operations ct_cpu_seq_fops = {
293 .open = ct_cpu_seq_open, 286 .open = ct_cpu_seq_open,
294 .read = seq_read, 287 .read = seq_read,
295 .llseek = seq_lseek, 288 .llseek = seq_lseek,
296 .release = seq_release_private, 289 .release = seq_release,
297}; 290};
291
292static int nf_conntrack_standalone_init_proc(void)
293{
294 struct proc_dir_entry *pde;
295
296 pde = proc_net_fops_create(&init_net, "nf_conntrack", 0440, &ct_file_ops);
297 if (!pde)
298 goto out_nf_conntrack;
299 pde = create_proc_entry("nf_conntrack", S_IRUGO, init_net.proc_net_stat);
300 if (!pde)
301 goto out_stat_nf_conntrack;
302 pde->proc_fops = &ct_cpu_seq_fops;
303 pde->owner = THIS_MODULE;
304 return 0;
305
306out_stat_nf_conntrack:
307 proc_net_remove(&init_net, "nf_conntrack");
308out_nf_conntrack:
309 return -ENOMEM;
310}
311
312static void nf_conntrack_standalone_fini_proc(void)
313{
314 remove_proc_entry("nf_conntrack", init_net.proc_net_stat);
315 proc_net_remove(&init_net, "nf_conntrack");
316}
317#else
318static int nf_conntrack_standalone_init_proc(void)
319{
320 return 0;
321}
322
323static void nf_conntrack_standalone_fini_proc(void)
324{
325}
298#endif /* CONFIG_PROC_FS */ 326#endif /* CONFIG_PROC_FS */
299 327
300/* Sysctl support */ 328/* Sysctl support */
@@ -390,60 +418,61 @@ static struct ctl_path nf_ct_path[] = {
390}; 418};
391 419
392EXPORT_SYMBOL_GPL(nf_ct_log_invalid); 420EXPORT_SYMBOL_GPL(nf_ct_log_invalid);
421
422static int nf_conntrack_standalone_init_sysctl(void)
423{
424 nf_ct_sysctl_header =
425 register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
426 if (nf_ct_sysctl_header == NULL) {
427 printk("nf_conntrack: can't register to sysctl.\n");
428 return -ENOMEM;
429 }
430 return 0;
431
432}
433
434static void nf_conntrack_standalone_fini_sysctl(void)
435{
436 unregister_sysctl_table(nf_ct_sysctl_header);
437}
438#else
439static int nf_conntrack_standalone_init_sysctl(void)
440{
441 return 0;
442}
443
444static void nf_conntrack_standalone_fini_sysctl(void)
445{
446}
393#endif /* CONFIG_SYSCTL */ 447#endif /* CONFIG_SYSCTL */
394 448
395static int __init nf_conntrack_standalone_init(void) 449static int __init nf_conntrack_standalone_init(void)
396{ 450{
397#ifdef CONFIG_PROC_FS 451 int ret;
398 struct proc_dir_entry *proc;
399#endif
400 int ret = 0;
401 452
402 ret = nf_conntrack_init(); 453 ret = nf_conntrack_init();
403 if (ret < 0) 454 if (ret < 0)
404 return ret; 455 goto out;
405 456 ret = nf_conntrack_standalone_init_proc();
406#ifdef CONFIG_PROC_FS 457 if (ret < 0)
407 proc = proc_net_fops_create(&init_net, "nf_conntrack", 0440, &ct_file_ops); 458 goto out_proc;
408 if (!proc) goto cleanup_init; 459 ret = nf_conntrack_standalone_init_sysctl();
409 460 if (ret < 0)
410 if (!proc_create("nf_conntrack", S_IRUGO, 461 goto out_sysctl;
411 init_net.proc_net_stat, &ct_cpu_seq_fops)) 462 return 0;
412 goto cleanup_proc;
413#endif
414#ifdef CONFIG_SYSCTL
415 nf_ct_sysctl_header = register_sysctl_paths(nf_ct_path,
416 nf_ct_netfilter_table);
417 if (nf_ct_sysctl_header == NULL) {
418 printk("nf_conntrack: can't register to sysctl.\n");
419 ret = -ENOMEM;
420 goto cleanup_proc_stat;
421 }
422#endif
423 return ret;
424 463
425#ifdef CONFIG_SYSCTL 464out_sysctl:
426 cleanup_proc_stat: 465 nf_conntrack_standalone_fini_proc();
427#endif 466out_proc:
428#ifdef CONFIG_PROC_FS
429 remove_proc_entry("nf_conntrack", init_net. proc_net_stat);
430 cleanup_proc:
431 proc_net_remove(&init_net, "nf_conntrack");
432 cleanup_init:
433#endif /* CNFIG_PROC_FS */
434 nf_conntrack_cleanup(); 467 nf_conntrack_cleanup();
468out:
435 return ret; 469 return ret;
436} 470}
437 471
438static void __exit nf_conntrack_standalone_fini(void) 472static void __exit nf_conntrack_standalone_fini(void)
439{ 473{
440#ifdef CONFIG_SYSCTL 474 nf_conntrack_standalone_fini_sysctl();
441 unregister_sysctl_table(nf_ct_sysctl_header); 475 nf_conntrack_standalone_fini_proc();
442#endif
443#ifdef CONFIG_PROC_FS
444 remove_proc_entry("nf_conntrack", init_net.proc_net_stat);
445 proc_net_remove(&init_net, "nf_conntrack");
446#endif /* CNFIG_PROC_FS */
447 nf_conntrack_cleanup(); 476 nf_conntrack_cleanup();
448} 477}
449 478
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index bd2e800f23cc..f57f6e7a71ee 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -44,7 +44,6 @@ static int tftp_help(struct sk_buff *skb,
44 struct nf_conntrack_expect *exp; 44 struct nf_conntrack_expect *exp;
45 struct nf_conntrack_tuple *tuple; 45 struct nf_conntrack_tuple *tuple;
46 unsigned int ret = NF_ACCEPT; 46 unsigned int ret = NF_ACCEPT;
47 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
48 typeof(nf_nat_tftp_hook) nf_nat_tftp; 47 typeof(nf_nat_tftp_hook) nf_nat_tftp;
49 48
50 tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr), 49 tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr),
@@ -56,18 +55,20 @@ static int tftp_help(struct sk_buff *skb,
56 case TFTP_OPCODE_READ: 55 case TFTP_OPCODE_READ:
57 case TFTP_OPCODE_WRITE: 56 case TFTP_OPCODE_WRITE:
58 /* RRQ and WRQ works the same way */ 57 /* RRQ and WRQ works the same way */
59 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 58 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
60 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 59 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
61 60
62 exp = nf_ct_expect_alloc(ct); 61 exp = nf_ct_expect_alloc(ct);
63 if (exp == NULL) 62 if (exp == NULL)
64 return NF_DROP; 63 return NF_DROP;
65 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 64 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
66 nf_ct_expect_init(exp, family, &tuple->src.u3, &tuple->dst.u3, 65 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
66 nf_ct_l3num(ct),
67 &tuple->src.u3, &tuple->dst.u3,
67 IPPROTO_UDP, NULL, &tuple->dst.u.udp.port); 68 IPPROTO_UDP, NULL, &tuple->dst.u.udp.port);
68 69
69 pr_debug("expect: "); 70 pr_debug("expect: ");
70 NF_CT_DUMP_TUPLE(&exp->tuple); 71 nf_ct_dump_tuple(&exp->tuple);
71 72
72 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); 73 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
73 if (nf_nat_tftp && ct->status & IPS_NAT_MASK) 74 if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
@@ -92,6 +93,11 @@ static int tftp_help(struct sk_buff *skb,
92static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly; 93static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly;
93static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly; 94static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly;
94 95
96static const struct nf_conntrack_expect_policy tftp_exp_policy = {
97 .max_expected = 1,
98 .timeout = 5 * 60,
99};
100
95static void nf_conntrack_tftp_fini(void) 101static void nf_conntrack_tftp_fini(void)
96{ 102{
97 int i, j; 103 int i, j;
@@ -118,8 +124,7 @@ static int __init nf_conntrack_tftp_init(void)
118 for (j = 0; j < 2; j++) { 124 for (j = 0; j < 2; j++) {
119 tftp[i][j].tuple.dst.protonum = IPPROTO_UDP; 125 tftp[i][j].tuple.dst.protonum = IPPROTO_UDP;
120 tftp[i][j].tuple.src.u.udp.port = htons(ports[i]); 126 tftp[i][j].tuple.src.u.udp.port = htons(ports[i]);
121 tftp[i][j].max_expected = 1; 127 tftp[i][j].expect_policy = &tftp_exp_policy;
122 tftp[i][j].timeout = 5 * 60; /* 5 minutes */
123 tftp[i][j].me = THIS_MODULE; 128 tftp[i][j].me = THIS_MODULE;
124 tftp[i][j].help = tftp_help; 129 tftp[i][j].help = tftp_help;
125 130
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 3dd4b3c76d81..69d699f95f4c 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -65,7 +65,7 @@ static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, int pf,
65{ 65{
66 struct nf_sockopt_ops *ops; 66 struct nf_sockopt_ops *ops;
67 67
68 if (sk->sk_net != &init_net) 68 if (sock_net(sk) != &init_net)
69 return ERR_PTR(-ENOPROTOOPT); 69 return ERR_PTR(-ENOPROTOOPT);
70 70
71 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) 71 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 10522c04ed24..2c9fe5c12894 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -557,7 +557,7 @@ nfqnl_rcv_dev_event(struct notifier_block *this,
557{ 557{
558 struct net_device *dev = ptr; 558 struct net_device *dev = ptr;
559 559
560 if (dev->nd_net != &init_net) 560 if (dev_net(dev) != &init_net)
561 return NOTIFY_DONE; 561 return NOTIFY_DONE;
562 562
563 /* Drop any packets associated with the downed device */ 563 /* Drop any packets associated with the downed device */
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a6792089fcf9..f52f7f810ac4 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -58,7 +58,7 @@ static struct xt_af *xt;
58#define duprintf(format, args...) 58#define duprintf(format, args...)
59#endif 59#endif
60 60
61static const char *xt_prefix[NPROTO] = { 61static const char *const xt_prefix[NPROTO] = {
62 [AF_INET] = "ip", 62 [AF_INET] = "ip",
63 [AF_INET6] = "ip6", 63 [AF_INET6] = "ip6",
64 [NF_ARP] = "arp", 64 [NF_ARP] = "arp",
@@ -248,7 +248,7 @@ EXPORT_SYMBOL_GPL(xt_request_find_target);
248 248
249static int match_revfn(int af, const char *name, u8 revision, int *bestp) 249static int match_revfn(int af, const char *name, u8 revision, int *bestp)
250{ 250{
251 struct xt_match *m; 251 const struct xt_match *m;
252 int have_rev = 0; 252 int have_rev = 0;
253 253
254 list_for_each_entry(m, &xt[af].match, list) { 254 list_for_each_entry(m, &xt[af].match, list) {
@@ -264,7 +264,7 @@ static int match_revfn(int af, const char *name, u8 revision, int *bestp)
264 264
265static int target_revfn(int af, const char *name, u8 revision, int *bestp) 265static int target_revfn(int af, const char *name, u8 revision, int *bestp)
266{ 266{
267 struct xt_target *t; 267 const struct xt_target *t;
268 int have_rev = 0; 268 int have_rev = 0;
269 269
270 list_for_each_entry(t, &xt[af].target, list) { 270 list_for_each_entry(t, &xt[af].target, list) {
@@ -385,7 +385,7 @@ short xt_compat_calc_jump(int af, unsigned int offset)
385} 385}
386EXPORT_SYMBOL_GPL(xt_compat_calc_jump); 386EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
387 387
388int xt_compat_match_offset(struct xt_match *match) 388int xt_compat_match_offset(const struct xt_match *match)
389{ 389{
390 u_int16_t csize = match->compatsize ? : match->matchsize; 390 u_int16_t csize = match->compatsize ? : match->matchsize;
391 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); 391 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(xt_compat_match_offset);
395int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, 395int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
396 unsigned int *size) 396 unsigned int *size)
397{ 397{
398 struct xt_match *match = m->u.kernel.match; 398 const struct xt_match *match = m->u.kernel.match;
399 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; 399 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
400 int pad, off = xt_compat_match_offset(match); 400 int pad, off = xt_compat_match_offset(match);
401 u_int16_t msize = cm->u.user.match_size; 401 u_int16_t msize = cm->u.user.match_size;
@@ -422,7 +422,7 @@ EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
422int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, 422int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
423 unsigned int *size) 423 unsigned int *size)
424{ 424{
425 struct xt_match *match = m->u.kernel.match; 425 const struct xt_match *match = m->u.kernel.match;
426 struct compat_xt_entry_match __user *cm = *dstptr; 426 struct compat_xt_entry_match __user *cm = *dstptr;
427 int off = xt_compat_match_offset(match); 427 int off = xt_compat_match_offset(match);
428 u_int16_t msize = m->u.user.match_size - off; 428 u_int16_t msize = m->u.user.match_size - off;
@@ -479,7 +479,7 @@ int xt_check_target(const struct xt_target *target, unsigned short family,
479EXPORT_SYMBOL_GPL(xt_check_target); 479EXPORT_SYMBOL_GPL(xt_check_target);
480 480
481#ifdef CONFIG_COMPAT 481#ifdef CONFIG_COMPAT
482int xt_compat_target_offset(struct xt_target *target) 482int xt_compat_target_offset(const struct xt_target *target)
483{ 483{
484 u_int16_t csize = target->compatsize ? : target->targetsize; 484 u_int16_t csize = target->compatsize ? : target->targetsize;
485 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); 485 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
@@ -489,7 +489,7 @@ EXPORT_SYMBOL_GPL(xt_compat_target_offset);
489void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, 489void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
490 unsigned int *size) 490 unsigned int *size)
491{ 491{
492 struct xt_target *target = t->u.kernel.target; 492 const struct xt_target *target = t->u.kernel.target;
493 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; 493 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
494 int pad, off = xt_compat_target_offset(target); 494 int pad, off = xt_compat_target_offset(target);
495 u_int16_t tsize = ct->u.user.target_size; 495 u_int16_t tsize = ct->u.user.target_size;
@@ -515,7 +515,7 @@ EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
515int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr, 515int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
516 unsigned int *size) 516 unsigned int *size)
517{ 517{
518 struct xt_target *target = t->u.kernel.target; 518 const struct xt_target *target = t->u.kernel.target;
519 struct compat_xt_entry_target __user *ct = *dstptr; 519 struct compat_xt_entry_target __user *ct = *dstptr;
520 int off = xt_compat_target_offset(target); 520 int off = xt_compat_target_offset(target);
521 u_int16_t tsize = t->u.user.target_size - off; 521 u_int16_t tsize = t->u.user.target_size - off;
@@ -727,7 +727,7 @@ struct xt_names_priv {
727static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) 727static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
728{ 728{
729 struct xt_names_priv *priv = seq->private; 729 struct xt_names_priv *priv = seq->private;
730 struct net *net = priv->p.net; 730 struct net *net = seq_file_net(seq);
731 int af = priv->af; 731 int af = priv->af;
732 732
733 mutex_lock(&xt[af].mutex); 733 mutex_lock(&xt[af].mutex);
@@ -737,7 +737,7 @@ static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
737static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) 737static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
738{ 738{
739 struct xt_names_priv *priv = seq->private; 739 struct xt_names_priv *priv = seq->private;
740 struct net *net = priv->p.net; 740 struct net *net = seq_file_net(seq);
741 int af = priv->af; 741 int af = priv->af;
742 742
743 return seq_list_next(v, &net->xt.tables[af], pos); 743 return seq_list_next(v, &net->xt.tables[af], pos);
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index 1faa9136195d..211189eb2b67 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -55,7 +55,7 @@ static void secmark_save(const struct sk_buff *skb)
55static void secmark_restore(struct sk_buff *skb) 55static void secmark_restore(struct sk_buff *skb)
56{ 56{
57 if (!skb->secmark) { 57 if (!skb->secmark) {
58 struct nf_conn *ct; 58 const struct nf_conn *ct;
59 enum ip_conntrack_info ctinfo; 59 enum ip_conntrack_info ctinfo;
60 60
61 ct = nf_ct_get(skb, &ctinfo); 61 ct = nf_ct_get(skb, &ctinfo);
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 24c73ba31eaa..64d6ad380293 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -96,7 +96,7 @@ xt_rateest_tg_checkentry(const char *tablename,
96 void *targinfo, 96 void *targinfo,
97 unsigned int hook_mask) 97 unsigned int hook_mask)
98{ 98{
99 struct xt_rateest_target_info *info = (void *)targinfo; 99 struct xt_rateest_target_info *info = targinfo;
100 struct xt_rateest *est; 100 struct xt_rateest *est;
101 struct { 101 struct {
102 struct nlattr opt; 102 struct nlattr opt;
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 3b0111933f60..2e89a00df92c 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -72,9 +72,7 @@ connlimit_iphash6(const union nf_inet_addr *addr,
72 72
73static inline bool already_closed(const struct nf_conn *conn) 73static inline bool already_closed(const struct nf_conn *conn)
74{ 74{
75 u_int16_t proto = conn->tuplehash[0].tuple.dst.protonum; 75 if (nf_ct_protonum(conn) == IPPROTO_TCP)
76
77 if (proto == IPPROTO_TCP)
78 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT; 76 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT;
79 else 77 else
80 return 0; 78 return 0;
@@ -106,10 +104,10 @@ static int count_them(struct xt_connlimit_data *data,
106 const union nf_inet_addr *mask, 104 const union nf_inet_addr *mask,
107 const struct xt_match *match) 105 const struct xt_match *match)
108{ 106{
109 struct nf_conntrack_tuple_hash *found; 107 const struct nf_conntrack_tuple_hash *found;
110 struct xt_connlimit_conn *conn; 108 struct xt_connlimit_conn *conn;
111 struct xt_connlimit_conn *tmp; 109 struct xt_connlimit_conn *tmp;
112 struct nf_conn *found_ct; 110 const struct nf_conn *found_ct;
113 struct list_head *hash; 111 struct list_head *hash;
114 bool addit = true; 112 bool addit = true;
115 int matches = 0; 113 int matches = 0;
@@ -256,7 +254,7 @@ connlimit_mt_check(const char *tablename, const void *ip,
256static void 254static void
257connlimit_mt_destroy(const struct xt_match *match, void *matchinfo) 255connlimit_mt_destroy(const struct xt_match *match, void *matchinfo)
258{ 256{
259 struct xt_connlimit_info *info = matchinfo; 257 const struct xt_connlimit_info *info = matchinfo;
260 struct xt_connlimit_conn *conn; 258 struct xt_connlimit_conn *conn;
261 struct xt_connlimit_conn *tmp; 259 struct xt_connlimit_conn *tmp;
262 struct list_head *hash = info->data->iphash; 260 struct list_head *hash = info->data->iphash;
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 0c50b2894055..d61412f58ef7 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -65,7 +65,7 @@ conntrack_mt_v0(const struct sk_buff *skb, const struct net_device *in,
65 } 65 }
66 66
67 if (sinfo->flags & XT_CONNTRACK_PROTO && 67 if (sinfo->flags & XT_CONNTRACK_PROTO &&
68 FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != 68 FWINV(nf_ct_protonum(ct) !=
69 sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, 69 sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum,
70 XT_CONNTRACK_PROTO)) 70 XT_CONNTRACK_PROTO))
71 return false; 71 return false;
@@ -174,7 +174,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
174 174
175 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 175 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
176 if ((info->match_flags & XT_CONNTRACK_PROTO) && 176 if ((info->match_flags & XT_CONNTRACK_PROTO) &&
177 (tuple->dst.protonum == info->l4proto) ^ 177 (nf_ct_protonum(ct) == info->l4proto) ^
178 !(info->invert_flags & XT_CONNTRACK_PROTO)) 178 !(info->invert_flags & XT_CONNTRACK_PROTO))
179 return false; 179 return false;
180 180
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c
index 667f45e72cd9..8b6522186d9f 100644
--- a/net/netfilter/xt_dccp.c
+++ b/net/netfilter/xt_dccp.c
@@ -98,7 +98,8 @@ dccp_mt(const struct sk_buff *skb, const struct net_device *in,
98 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop) 98 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop)
99{ 99{
100 const struct xt_dccp_info *info = matchinfo; 100 const struct xt_dccp_info *info = matchinfo;
101 struct dccp_hdr _dh, *dh; 101 const struct dccp_hdr *dh;
102 struct dccp_hdr _dh;
102 103
103 if (offset) 104 if (offset)
104 return false; 105 return false;
diff --git a/net/netfilter/xt_esp.c b/net/netfilter/xt_esp.c
index 71c7c3785266..a133eb9b23e1 100644
--- a/net/netfilter/xt_esp.c
+++ b/net/netfilter/xt_esp.c
@@ -47,7 +47,8 @@ esp_mt(const struct sk_buff *skb, const struct net_device *in,
47 const struct net_device *out, const struct xt_match *match, 47 const struct net_device *out, const struct xt_match *match,
48 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop) 48 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop)
49{ 49{
50 struct ip_esp_hdr _esp, *eh; 50 const struct ip_esp_hdr *eh;
51 struct ip_esp_hdr _esp;
51 const struct xt_esp *espinfo = matchinfo; 52 const struct xt_esp *espinfo = matchinfo;
52 53
53 /* Must not be a fragment. */ 54 /* Must not be a fragment. */
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c
index 31daa8192422..fd88c489b70e 100644
--- a/net/netfilter/xt_multiport.c
+++ b/net/netfilter/xt_multiport.c
@@ -100,7 +100,8 @@ multiport_mt_v0(const struct sk_buff *skb, const struct net_device *in,
100 const void *matchinfo, int offset, unsigned int protoff, 100 const void *matchinfo, int offset, unsigned int protoff,
101 bool *hotdrop) 101 bool *hotdrop)
102{ 102{
103 __be16 _ports[2], *pptr; 103 const __be16 *pptr;
104 __be16 _ports[2];
104 const struct xt_multiport *multiinfo = matchinfo; 105 const struct xt_multiport *multiinfo = matchinfo;
105 106
106 if (offset) 107 if (offset)
@@ -126,7 +127,8 @@ multiport_mt(const struct sk_buff *skb, const struct net_device *in,
126 const void *matchinfo, int offset, unsigned int protoff, 127 const void *matchinfo, int offset, unsigned int protoff,
127 bool *hotdrop) 128 bool *hotdrop)
128{ 129{
129 __be16 _ports[2], *pptr; 130 const __be16 *pptr;
131 __be16 _ports[2];
130 const struct xt_multiport_v1 *multiinfo = matchinfo; 132 const struct xt_multiport_v1 *multiinfo = matchinfo;
131 133
132 if (offset) 134 if (offset)
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 9e918add2282..d351582b2a3d 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -136,7 +136,7 @@ policy_mt_check(const char *tablename, const void *ip_void,
136 const struct xt_match *match, void *matchinfo, 136 const struct xt_match *match, void *matchinfo,
137 unsigned int hook_mask) 137 unsigned int hook_mask)
138{ 138{
139 struct xt_policy_info *info = matchinfo; 139 const struct xt_policy_info *info = matchinfo;
140 140
141 if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { 141 if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
142 printk(KERN_ERR "xt_policy: neither incoming nor " 142 printk(KERN_ERR "xt_policy: neither incoming nor "
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index fdb86a515146..ebd84f1b4f62 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -86,7 +86,7 @@ static bool xt_rateest_mt_checkentry(const char *tablename,
86 void *matchinfo, 86 void *matchinfo,
87 unsigned int hook_mask) 87 unsigned int hook_mask)
88{ 88{
89 struct xt_rateest_match_info *info = (void *)matchinfo; 89 struct xt_rateest_match_info *info = matchinfo;
90 struct xt_rateest *est1, *est2; 90 struct xt_rateest *est1, *est2;
91 91
92 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 92 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
@@ -130,7 +130,7 @@ err1:
130static void xt_rateest_mt_destroy(const struct xt_match *match, 130static void xt_rateest_mt_destroy(const struct xt_match *match,
131 void *matchinfo) 131 void *matchinfo)
132{ 132{
133 struct xt_rateest_match_info *info = (void *)matchinfo; 133 struct xt_rateest_match_info *info = matchinfo;
134 134
135 xt_rateest_put(info->est1); 135 xt_rateest_put(info->est1);
136 if (info->est2) 136 if (info->est2)
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index b718ec64333d..e6e4681fa047 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -46,7 +46,8 @@ match_packet(const struct sk_buff *skb,
46 bool *hotdrop) 46 bool *hotdrop)
47{ 47{
48 u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)]; 48 u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)];
49 sctp_chunkhdr_t _sch, *sch; 49 const sctp_chunkhdr_t *sch;
50 sctp_chunkhdr_t _sch;
50 int chunk_match_type = info->chunk_match_type; 51 int chunk_match_type = info->chunk_match_type;
51 const struct xt_sctp_flag_info *flag_info = info->flag_info; 52 const struct xt_sctp_flag_info *flag_info = info->flag_info;
52 int flag_count = info->flag_count; 53 int flag_count = info->flag_count;
@@ -121,7 +122,8 @@ sctp_mt(const struct sk_buff *skb, const struct net_device *in,
121 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop) 122 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop)
122{ 123{
123 const struct xt_sctp_info *info = matchinfo; 124 const struct xt_sctp_info *info = matchinfo;
124 sctp_sctphdr_t _sh, *sh; 125 const sctp_sctphdr_t *sh;
126 sctp_sctphdr_t _sh;
125 127
126 if (offset) { 128 if (offset) {
127 duprintf("Dropping non-first fragment.. FIXME\n"); 129 duprintf("Dropping non-first fragment.. FIXME\n");
diff --git a/net/netfilter/xt_tcpmss.c b/net/netfilter/xt_tcpmss.c
index d7a5b27fe81e..6771bf01275b 100644
--- a/net/netfilter/xt_tcpmss.c
+++ b/net/netfilter/xt_tcpmss.c
@@ -31,9 +31,11 @@ tcpmss_mt(const struct sk_buff *skb, const struct net_device *in,
31 bool *hotdrop) 31 bool *hotdrop)
32{ 32{
33 const struct xt_tcpmss_match_info *info = matchinfo; 33 const struct xt_tcpmss_match_info *info = matchinfo;
34 struct tcphdr _tcph, *th; 34 const struct tcphdr *th;
35 struct tcphdr _tcph;
35 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */ 36 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
36 u8 _opt[15 * 4 - sizeof(_tcph)], *op; 37 const u_int8_t *op;
38 u8 _opt[15 * 4 - sizeof(_tcph)];
37 unsigned int i, optlen; 39 unsigned int i, optlen;
38 40
39 /* If we don't have the whole header, drop packet. */ 41 /* If we don't have the whole header, drop packet. */
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index 4fa3b669f691..951b06b8d701 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -42,7 +42,8 @@ tcp_find_option(u_int8_t option,
42 bool *hotdrop) 42 bool *hotdrop)
43{ 43{
44 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */ 44 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
45 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op; 45 const u_int8_t *op;
46 u_int8_t _opt[60 - sizeof(struct tcphdr)];
46 unsigned int i; 47 unsigned int i;
47 48
48 duprintf("tcp_match: finding option\n"); 49 duprintf("tcp_match: finding option\n");
@@ -72,7 +73,8 @@ tcp_mt(const struct sk_buff *skb, const struct net_device *in,
72 const struct net_device *out, const struct xt_match *match, 73 const struct net_device *out, const struct xt_match *match,
73 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop) 74 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop)
74{ 75{
75 struct tcphdr _tcph, *th; 76 const struct tcphdr *th;
77 struct tcphdr _tcph;
76 const struct xt_tcp *tcpinfo = matchinfo; 78 const struct xt_tcp *tcpinfo = matchinfo;
77 79
78 if (offset) { 80 if (offset) {
@@ -144,7 +146,8 @@ udp_mt(const struct sk_buff *skb, const struct net_device *in,
144 const struct net_device *out, const struct xt_match *match, 146 const struct net_device *out, const struct xt_match *match,
145 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop) 147 const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop)
146{ 148{
147 struct udphdr _udph, *uh; 149 const struct udphdr *uh;
150 struct udphdr _udph;
148 const struct xt_udp *udpinfo = matchinfo; 151 const struct xt_udp *udpinfo = matchinfo;
149 152
150 /* Must not be a fragment. */ 153 /* Must not be a fragment. */
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 9fa2e0824708..ed76baab4734 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -223,7 +223,7 @@ time_mt_check(const char *tablename, const void *ip,
223 const struct xt_match *match, void *matchinfo, 223 const struct xt_match *match, void *matchinfo,
224 unsigned int hook_mask) 224 unsigned int hook_mask)
225{ 225{
226 struct xt_time_info *info = matchinfo; 226 const struct xt_time_info *info = matchinfo;
227 227
228 if (info->daytime_start > XT_TIME_MAX_DAYTIME || 228 if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
229 info->daytime_stop > XT_TIME_MAX_DAYTIME) { 229 info->daytime_stop > XT_TIME_MAX_DAYTIME) {
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 4478f2f6079d..d282ad1570a7 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -954,7 +954,7 @@ static int netlbl_unlhsh_netdev_handler(struct notifier_block *this,
954 struct net_device *dev = ptr; 954 struct net_device *dev = ptr;
955 struct netlbl_unlhsh_iface *iface = NULL; 955 struct netlbl_unlhsh_iface *iface = NULL;
956 956
957 if (dev->nd_net != &init_net) 957 if (dev_net(dev) != &init_net)
958 return NOTIFY_DONE; 958 return NOTIFY_DONE;
959 959
960 /* XXX - should this be a check for NETDEV_DOWN or _UNREGISTER? */ 960 /* XXX - should this be a check for NETDEV_DOWN or _UNREGISTER? */
@@ -1339,6 +1339,10 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
1339 1339
1340 if (iface->ifindex > 0) { 1340 if (iface->ifindex > 0) {
1341 dev = dev_get_by_index(&init_net, iface->ifindex); 1341 dev = dev_get_by_index(&init_net, iface->ifindex);
1342 if (!dev) {
1343 ret_val = -ENODEV;
1344 goto list_cb_failure;
1345 }
1342 ret_val = nla_put_string(cb_arg->skb, 1346 ret_val = nla_put_string(cb_arg->skb,
1343 NLBL_UNLABEL_A_IFACE, dev->name); 1347 NLBL_UNLABEL_A_IFACE, dev->name);
1344 dev_put(dev); 1348 dev_put(dev);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1ab0da2632e1..36f75d873898 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -228,7 +228,7 @@ static inline struct sock *netlink_lookup(struct net *net, int protocol,
228 read_lock(&nl_table_lock); 228 read_lock(&nl_table_lock);
229 head = nl_pid_hashfn(hash, pid); 229 head = nl_pid_hashfn(hash, pid);
230 sk_for_each(sk, node, head) { 230 sk_for_each(sk, node, head) {
231 if ((sk->sk_net == net) && (nlk_sk(sk)->pid == pid)) { 231 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
232 sock_hold(sk); 232 sock_hold(sk);
233 goto found; 233 goto found;
234 } 234 }
@@ -348,7 +348,7 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
348 head = nl_pid_hashfn(hash, pid); 348 head = nl_pid_hashfn(hash, pid);
349 len = 0; 349 len = 0;
350 sk_for_each(osk, node, head) { 350 sk_for_each(osk, node, head) {
351 if ((osk->sk_net == net) && (nlk_sk(osk)->pid == pid)) 351 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
352 break; 352 break;
353 len++; 353 len++;
354 } 354 }
@@ -486,7 +486,7 @@ static int netlink_release(struct socket *sock)
486 486
487 if (nlk->pid && !nlk->subscriptions) { 487 if (nlk->pid && !nlk->subscriptions) {
488 struct netlink_notify n = { 488 struct netlink_notify n = {
489 .net = sk->sk_net, 489 .net = sock_net(sk),
490 .protocol = sk->sk_protocol, 490 .protocol = sk->sk_protocol,
491 .pid = nlk->pid, 491 .pid = nlk->pid,
492 }; 492 };
@@ -518,7 +518,7 @@ static int netlink_release(struct socket *sock)
518static int netlink_autobind(struct socket *sock) 518static int netlink_autobind(struct socket *sock)
519{ 519{
520 struct sock *sk = sock->sk; 520 struct sock *sk = sock->sk;
521 struct net *net = sk->sk_net; 521 struct net *net = sock_net(sk);
522 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; 522 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
523 struct hlist_head *head; 523 struct hlist_head *head;
524 struct sock *osk; 524 struct sock *osk;
@@ -532,7 +532,7 @@ retry:
532 netlink_table_grab(); 532 netlink_table_grab();
533 head = nl_pid_hashfn(hash, pid); 533 head = nl_pid_hashfn(hash, pid);
534 sk_for_each(osk, node, head) { 534 sk_for_each(osk, node, head) {
535 if ((osk->sk_net != net)) 535 if (!net_eq(sock_net(osk), net))
536 continue; 536 continue;
537 if (nlk_sk(osk)->pid == pid) { 537 if (nlk_sk(osk)->pid == pid) {
538 /* Bind collision, search negative pid values. */ 538 /* Bind collision, search negative pid values. */
@@ -611,7 +611,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
611 int addr_len) 611 int addr_len)
612{ 612{
613 struct sock *sk = sock->sk; 613 struct sock *sk = sock->sk;
614 struct net *net = sk->sk_net; 614 struct net *net = sock_net(sk);
615 struct netlink_sock *nlk = nlk_sk(sk); 615 struct netlink_sock *nlk = nlk_sk(sk);
616 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 616 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
617 int err; 617 int err;
@@ -720,7 +720,7 @@ static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
720 struct sock *sock; 720 struct sock *sock;
721 struct netlink_sock *nlk; 721 struct netlink_sock *nlk;
722 722
723 sock = netlink_lookup(ssk->sk_net, ssk->sk_protocol, pid); 723 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
724 if (!sock) 724 if (!sock)
725 return ERR_PTR(-ECONNREFUSED); 725 return ERR_PTR(-ECONNREFUSED);
726 726
@@ -886,6 +886,13 @@ retry:
886 if (netlink_is_kernel(sk)) 886 if (netlink_is_kernel(sk))
887 return netlink_unicast_kernel(sk, skb); 887 return netlink_unicast_kernel(sk, skb);
888 888
889 if (sk_filter(sk, skb)) {
890 int err = skb->len;
891 kfree_skb(skb);
892 sock_put(sk);
893 return err;
894 }
895
889 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); 896 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
890 if (err == 1) 897 if (err == 1)
891 goto retry; 898 goto retry;
@@ -955,7 +962,7 @@ static inline int do_one_broadcast(struct sock *sk,
955 !test_bit(p->group - 1, nlk->groups)) 962 !test_bit(p->group - 1, nlk->groups))
956 goto out; 963 goto out;
957 964
958 if ((sk->sk_net != p->net)) 965 if (!net_eq(sock_net(sk), p->net))
959 goto out; 966 goto out;
960 967
961 if (p->failure) { 968 if (p->failure) {
@@ -980,6 +987,9 @@ static inline int do_one_broadcast(struct sock *sk,
980 netlink_overrun(sk); 987 netlink_overrun(sk);
981 /* Clone failed. Notify ALL listeners. */ 988 /* Clone failed. Notify ALL listeners. */
982 p->failure = 1; 989 p->failure = 1;
990 } else if (sk_filter(sk, p->skb2)) {
991 kfree_skb(p->skb2);
992 p->skb2 = NULL;
983 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { 993 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
984 netlink_overrun(sk); 994 netlink_overrun(sk);
985 } else { 995 } else {
@@ -996,7 +1006,7 @@ out:
996int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, 1006int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
997 u32 group, gfp_t allocation) 1007 u32 group, gfp_t allocation)
998{ 1008{
999 struct net *net = ssk->sk_net; 1009 struct net *net = sock_net(ssk);
1000 struct netlink_broadcast_data info; 1010 struct netlink_broadcast_data info;
1001 struct hlist_node *node; 1011 struct hlist_node *node;
1002 struct sock *sk; 1012 struct sock *sk;
@@ -1054,7 +1064,7 @@ static inline int do_one_set_err(struct sock *sk,
1054 if (sk == p->exclude_sk) 1064 if (sk == p->exclude_sk)
1055 goto out; 1065 goto out;
1056 1066
1057 if (sk->sk_net != p->exclude_sk->sk_net) 1067 if (sock_net(sk) != sock_net(p->exclude_sk))
1058 goto out; 1068 goto out;
1059 1069
1060 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || 1070 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
@@ -1344,22 +1354,6 @@ static void netlink_data_ready(struct sock *sk, int len)
1344 * queueing. 1354 * queueing.
1345 */ 1355 */
1346 1356
1347static void __netlink_release(struct sock *sk)
1348{
1349 /*
1350 * Last sock_put should drop referrence to sk->sk_net. It has already
1351 * been dropped in netlink_kernel_create. Taking referrence to stopping
1352 * namespace is not an option.
1353 * Take referrence to a socket to remove it from netlink lookup table
1354 * _alive_ and after that destroy it in the context of init_net.
1355 */
1356
1357 sock_hold(sk);
1358 sock_release(sk->sk_socket);
1359 sk->sk_net = get_net(&init_net);
1360 sock_put(sk);
1361}
1362
1363struct sock * 1357struct sock *
1364netlink_kernel_create(struct net *net, int unit, unsigned int groups, 1358netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1365 void (*input)(struct sk_buff *skb), 1359 void (*input)(struct sk_buff *skb),
@@ -1388,8 +1382,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1388 goto out_sock_release_nosk; 1382 goto out_sock_release_nosk;
1389 1383
1390 sk = sock->sk; 1384 sk = sock->sk;
1391 put_net(sk->sk_net); 1385 sk_change_net(sk, net);
1392 sk->sk_net = net;
1393 1386
1394 if (groups < 32) 1387 if (groups < 32)
1395 groups = 32; 1388 groups = 32;
@@ -1424,7 +1417,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1424 1417
1425out_sock_release: 1418out_sock_release:
1426 kfree(listeners); 1419 kfree(listeners);
1427 __netlink_release(sk); 1420 netlink_kernel_release(sk);
1428 return NULL; 1421 return NULL;
1429 1422
1430out_sock_release_nosk: 1423out_sock_release_nosk:
@@ -1437,10 +1430,7 @@ EXPORT_SYMBOL(netlink_kernel_create);
1437void 1430void
1438netlink_kernel_release(struct sock *sk) 1431netlink_kernel_release(struct sock *sk)
1439{ 1432{
1440 if (sk == NULL || sk->sk_socket == NULL) 1433 sk_release_kernel(sk);
1441 return;
1442
1443 __netlink_release(sk);
1444} 1434}
1445EXPORT_SYMBOL(netlink_kernel_release); 1435EXPORT_SYMBOL(netlink_kernel_release);
1446 1436
@@ -1553,8 +1543,13 @@ static int netlink_dump(struct sock *sk)
1553 1543
1554 if (len > 0) { 1544 if (len > 0) {
1555 mutex_unlock(nlk->cb_mutex); 1545 mutex_unlock(nlk->cb_mutex);
1556 skb_queue_tail(&sk->sk_receive_queue, skb); 1546
1557 sk->sk_data_ready(sk, len); 1547 if (sk_filter(sk, skb))
1548 kfree_skb(skb);
1549 else {
1550 skb_queue_tail(&sk->sk_receive_queue, skb);
1551 sk->sk_data_ready(sk, skb->len);
1552 }
1558 return 0; 1553 return 0;
1559 } 1554 }
1560 1555
@@ -1564,8 +1559,12 @@ static int netlink_dump(struct sock *sk)
1564 1559
1565 memcpy(nlmsg_data(nlh), &len, sizeof(len)); 1560 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1566 1561
1567 skb_queue_tail(&sk->sk_receive_queue, skb); 1562 if (sk_filter(sk, skb))
1568 sk->sk_data_ready(sk, skb->len); 1563 kfree_skb(skb);
1564 else {
1565 skb_queue_tail(&sk->sk_receive_queue, skb);
1566 sk->sk_data_ready(sk, skb->len);
1567 }
1569 1568
1570 if (cb->done) 1569 if (cb->done)
1571 cb->done(cb); 1570 cb->done(cb);
@@ -1602,7 +1601,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1602 atomic_inc(&skb->users); 1601 atomic_inc(&skb->users);
1603 cb->skb = skb; 1602 cb->skb = skb;
1604 1603
1605 sk = netlink_lookup(ssk->sk_net, ssk->sk_protocol, NETLINK_CB(skb).pid); 1604 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
1606 if (sk == NULL) { 1605 if (sk == NULL) {
1607 netlink_destroy_callback(cb); 1606 netlink_destroy_callback(cb);
1608 return -ECONNREFUSED; 1607 return -ECONNREFUSED;
@@ -1644,7 +1643,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1644 if (!skb) { 1643 if (!skb) {
1645 struct sock *sk; 1644 struct sock *sk;
1646 1645
1647 sk = netlink_lookup(in_skb->sk->sk_net, 1646 sk = netlink_lookup(sock_net(in_skb->sk),
1648 in_skb->sk->sk_protocol, 1647 in_skb->sk->sk_protocol,
1649 NETLINK_CB(in_skb).pid); 1648 NETLINK_CB(in_skb).pid);
1650 if (sk) { 1649 if (sk) {
@@ -1759,7 +1758,7 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1759 1758
1760 for (j = 0; j <= hash->mask; j++) { 1759 for (j = 0; j <= hash->mask; j++) {
1761 sk_for_each(s, node, &hash->table[j]) { 1760 sk_for_each(s, node, &hash->table[j]) {
1762 if (iter->p.net != s->sk_net) 1761 if (sock_net(s) != seq_file_net(seq))
1763 continue; 1762 continue;
1764 if (off == pos) { 1763 if (off == pos) {
1765 iter->link = i; 1764 iter->link = i;
@@ -1795,7 +1794,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1795 s = v; 1794 s = v;
1796 do { 1795 do {
1797 s = sk_next(s); 1796 s = sk_next(s);
1798 } while (s && (iter->p.net != s->sk_net)); 1797 } while (s && sock_net(s) != seq_file_net(seq));
1799 if (s) 1798 if (s)
1800 return s; 1799 return s;
1801 1800
@@ -1807,7 +1806,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1807 1806
1808 for (; j <= hash->mask; j++) { 1807 for (; j <= hash->mask; j++) {
1809 s = sk_head(&hash->table[j]); 1808 s = sk_head(&hash->table[j]);
1810 while (s && (iter->p.net != s->sk_net)) 1809 while (s && sock_net(s) != seq_file_net(seq))
1811 s = sk_next(s); 1810 s = sk_next(s);
1812 if (s) { 1811 if (s) {
1813 iter->link = i; 1812 iter->link = i;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 972250c974f1..4bae8b998cab 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -106,7 +106,7 @@ static int nr_device_event(struct notifier_block *this, unsigned long event, voi
106{ 106{
107 struct net_device *dev = (struct net_device *)ptr; 107 struct net_device *dev = (struct net_device *)ptr;
108 108
109 if (dev->nd_net != &init_net) 109 if (dev_net(dev) != &init_net)
110 return NOTIFY_DONE; 110 return NOTIFY_DONE;
111 111
112 if (event != NETDEV_DOWN) 112 if (event != NETDEV_DOWN)
@@ -466,7 +466,7 @@ static struct sock *nr_make_new(struct sock *osk)
466 if (osk->sk_type != SOCK_SEQPACKET) 466 if (osk->sk_type != SOCK_SEQPACKET)
467 return NULL; 467 return NULL;
468 468
469 sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot); 469 sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot);
470 if (sk == NULL) 470 if (sk == NULL)
471 return NULL; 471 return NULL;
472 472
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b8b827c7062d..25070240d4ae 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -263,7 +263,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
263 if (skb->pkt_type == PACKET_LOOPBACK) 263 if (skb->pkt_type == PACKET_LOOPBACK)
264 goto out; 264 goto out;
265 265
266 if (dev->nd_net != sk->sk_net) 266 if (dev_net(dev) != sock_net(sk))
267 goto out; 267 goto out;
268 268
269 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 269 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
@@ -337,7 +337,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
337 */ 337 */
338 338
339 saddr->spkt_device[13] = 0; 339 saddr->spkt_device[13] = 0;
340 dev = dev_get_by_name(sk->sk_net, saddr->spkt_device); 340 dev = dev_get_by_name(sock_net(sk), saddr->spkt_device);
341 err = -ENODEV; 341 err = -ENODEV;
342 if (dev == NULL) 342 if (dev == NULL)
343 goto out_unlock; 343 goto out_unlock;
@@ -451,7 +451,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
451 sk = pt->af_packet_priv; 451 sk = pt->af_packet_priv;
452 po = pkt_sk(sk); 452 po = pkt_sk(sk);
453 453
454 if (dev->nd_net != sk->sk_net) 454 if (dev_net(dev) != sock_net(sk))
455 goto drop; 455 goto drop;
456 456
457 skb->dev = dev; 457 skb->dev = dev;
@@ -568,7 +568,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
568 sk = pt->af_packet_priv; 568 sk = pt->af_packet_priv;
569 po = pkt_sk(sk); 569 po = pkt_sk(sk);
570 570
571 if (dev->nd_net != sk->sk_net) 571 if (dev_net(dev) != sock_net(sk))
572 goto drop; 572 goto drop;
573 573
574 if (dev->header_ops) { 574 if (dev->header_ops) {
@@ -728,7 +728,7 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
728 } 728 }
729 729
730 730
731 dev = dev_get_by_index(sk->sk_net, ifindex); 731 dev = dev_get_by_index(sock_net(sk), ifindex);
732 err = -ENXIO; 732 err = -ENXIO;
733 if (dev == NULL) 733 if (dev == NULL)
734 goto out_unlock; 734 goto out_unlock;
@@ -800,7 +800,7 @@ static int packet_release(struct socket *sock)
800 if (!sk) 800 if (!sk)
801 return 0; 801 return 0;
802 802
803 net = sk->sk_net; 803 net = sock_net(sk);
804 po = pkt_sk(sk); 804 po = pkt_sk(sk);
805 805
806 write_lock_bh(&net->packet.sklist_lock); 806 write_lock_bh(&net->packet.sklist_lock);
@@ -914,7 +914,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
914 return -EINVAL; 914 return -EINVAL;
915 strlcpy(name,uaddr->sa_data,sizeof(name)); 915 strlcpy(name,uaddr->sa_data,sizeof(name));
916 916
917 dev = dev_get_by_name(sk->sk_net, name); 917 dev = dev_get_by_name(sock_net(sk), name);
918 if (dev) { 918 if (dev) {
919 err = packet_do_bind(sk, dev, pkt_sk(sk)->num); 919 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
920 dev_put(dev); 920 dev_put(dev);
@@ -941,7 +941,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
941 941
942 if (sll->sll_ifindex) { 942 if (sll->sll_ifindex) {
943 err = -ENODEV; 943 err = -ENODEV;
944 dev = dev_get_by_index(sk->sk_net, sll->sll_ifindex); 944 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
945 if (dev == NULL) 945 if (dev == NULL)
946 goto out; 946 goto out;
947 } 947 }
@@ -1135,7 +1135,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1135 return -EOPNOTSUPP; 1135 return -EOPNOTSUPP;
1136 1136
1137 uaddr->sa_family = AF_PACKET; 1137 uaddr->sa_family = AF_PACKET;
1138 dev = dev_get_by_index(sk->sk_net, pkt_sk(sk)->ifindex); 1138 dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex);
1139 if (dev) { 1139 if (dev) {
1140 strlcpy(uaddr->sa_data, dev->name, 15); 1140 strlcpy(uaddr->sa_data, dev->name, 15);
1141 dev_put(dev); 1141 dev_put(dev);
@@ -1160,7 +1160,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1160 sll->sll_family = AF_PACKET; 1160 sll->sll_family = AF_PACKET;
1161 sll->sll_ifindex = po->ifindex; 1161 sll->sll_ifindex = po->ifindex;
1162 sll->sll_protocol = po->num; 1162 sll->sll_protocol = po->num;
1163 dev = dev_get_by_index(sk->sk_net, po->ifindex); 1163 dev = dev_get_by_index(sock_net(sk), po->ifindex);
1164 if (dev) { 1164 if (dev) {
1165 sll->sll_hatype = dev->type; 1165 sll->sll_hatype = dev->type;
1166 sll->sll_halen = dev->addr_len; 1166 sll->sll_halen = dev->addr_len;
@@ -1212,7 +1212,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1212 rtnl_lock(); 1212 rtnl_lock();
1213 1213
1214 err = -ENODEV; 1214 err = -ENODEV;
1215 dev = __dev_get_by_index(sk->sk_net, mreq->mr_ifindex); 1215 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1216 if (!dev) 1216 if (!dev)
1217 goto done; 1217 goto done;
1218 1218
@@ -1266,7 +1266,7 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1266 if (--ml->count == 0) { 1266 if (--ml->count == 0) {
1267 struct net_device *dev; 1267 struct net_device *dev;
1268 *mlp = ml->next; 1268 *mlp = ml->next;
1269 dev = dev_get_by_index(sk->sk_net, ml->ifindex); 1269 dev = dev_get_by_index(sock_net(sk), ml->ifindex);
1270 if (dev) { 1270 if (dev) {
1271 packet_dev_mc(dev, ml, -1); 1271 packet_dev_mc(dev, ml, -1);
1272 dev_put(dev); 1272 dev_put(dev);
@@ -1294,7 +1294,7 @@ static void packet_flush_mclist(struct sock *sk)
1294 struct net_device *dev; 1294 struct net_device *dev;
1295 1295
1296 po->mclist = ml->next; 1296 po->mclist = ml->next;
1297 if ((dev = dev_get_by_index(sk->sk_net, ml->ifindex)) != NULL) { 1297 if ((dev = dev_get_by_index(sock_net(sk), ml->ifindex)) != NULL) {
1298 packet_dev_mc(dev, ml, -1); 1298 packet_dev_mc(dev, ml, -1);
1299 dev_put(dev); 1299 dev_put(dev);
1300 } 1300 }
@@ -1450,7 +1450,7 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
1450 struct sock *sk; 1450 struct sock *sk;
1451 struct hlist_node *node; 1451 struct hlist_node *node;
1452 struct net_device *dev = data; 1452 struct net_device *dev = data;
1453 struct net *net = dev->nd_net; 1453 struct net *net = dev_net(dev);
1454 1454
1455 read_lock(&net->packet.sklist_lock); 1455 read_lock(&net->packet.sklist_lock);
1456 sk_for_each(sk, node, &net->packet.sklist) { 1456 sk_for_each(sk, node, &net->packet.sklist) {
@@ -1540,7 +1540,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
1540 case SIOCGIFDSTADDR: 1540 case SIOCGIFDSTADDR:
1541 case SIOCSIFDSTADDR: 1541 case SIOCSIFDSTADDR:
1542 case SIOCSIFFLAGS: 1542 case SIOCSIFFLAGS:
1543 if (sk->sk_net != &init_net) 1543 if (sock_net(sk) != &init_net)
1544 return -ENOIOCTLCMD; 1544 return -ENOIOCTLCMD;
1545 return inet_dgram_ops.ioctl(sock, cmd, arg); 1545 return inet_dgram_ops.ioctl(sock, cmd, arg);
1546#endif 1546#endif
@@ -1658,7 +1658,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1658 int err = 0; 1658 int err = 0;
1659 1659
1660 if (req->tp_block_nr) { 1660 if (req->tp_block_nr) {
1661 int i, l; 1661 int i;
1662 1662
1663 /* Sanity tests and some calculations */ 1663 /* Sanity tests and some calculations */
1664 1664
@@ -1687,7 +1687,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1687 if (unlikely(!pg_vec)) 1687 if (unlikely(!pg_vec))
1688 goto out; 1688 goto out;
1689 1689
1690 l = 0;
1691 for (i = 0; i < req->tp_block_nr; i++) { 1690 for (i = 0; i < req->tp_block_nr; i++) {
1692 char *ptr = pg_vec[i]; 1691 char *ptr = pg_vec[i];
1693 struct tpacket_hdr *header; 1692 struct tpacket_hdr *header;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 063cbc5c26b1..d1ff3f885c5d 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -197,7 +197,7 @@ static int rose_device_event(struct notifier_block *this, unsigned long event,
197{ 197{
198 struct net_device *dev = (struct net_device *)ptr; 198 struct net_device *dev = (struct net_device *)ptr;
199 199
200 if (dev->nd_net != &init_net) 200 if (dev_net(dev) != &init_net)
201 return NOTIFY_DONE; 201 return NOTIFY_DONE;
202 202
203 if (event != NETDEV_DOWN) 203 if (event != NETDEV_DOWN)
@@ -551,7 +551,7 @@ static struct sock *rose_make_new(struct sock *osk)
551 if (osk->sk_type != SOCK_SEQPACKET) 551 if (osk->sk_type != SOCK_SEQPACKET)
552 return NULL; 552 return NULL;
553 553
554 sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto); 554 sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
555 if (sk == NULL) 555 if (sk == NULL)
556 return NULL; 556 return NULL;
557 557
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 53fe94c9d36b..3e7318c1343c 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -619,8 +619,8 @@ void _dbprintk(const char *fmt, ...)
619{ 619{
620} 620}
621 621
622#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) 622#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
623#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) 623#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
624#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) 624#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
625#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__) 625#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
626#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__) 626#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
@@ -671,8 +671,8 @@ do { \
671} while (0) 671} while (0)
672 672
673#else 673#else
674#define _enter(FMT,...) _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) 674#define _enter(FMT,...) _dbprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
675#define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) 675#define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
676#define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__) 676#define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__)
677#define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__) 677#define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__)
678#define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__) 678#define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__)
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
index 83eda247fe48..017322e2786d 100644
--- a/net/rxrpc/ar-proc.c
+++ b/net/rxrpc/ar-proc.c
@@ -103,7 +103,7 @@ const struct file_operations rxrpc_call_seq_fops = {
103 .open = rxrpc_call_seq_open, 103 .open = rxrpc_call_seq_open,
104 .read = seq_read, 104 .read = seq_read,
105 .llseek = seq_lseek, 105 .llseek = seq_lseek,
106 .release = seq_release_private, 106 .release = seq_release,
107}; 107};
108 108
109/* 109/*
@@ -188,5 +188,5 @@ const struct file_operations rxrpc_connection_seq_fops = {
188 .open = rxrpc_connection_seq_open, 188 .open = rxrpc_connection_seq_open,
189 .read = seq_read, 189 .read = seq_read,
190 .llseek = seq_lseek, 190 .llseek = seq_lseek,
191 .release = seq_release_private, 191 .release = seq_release,
192}; 192};
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 0b8eb235bc13..74e662cbb2c5 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -951,7 +951,7 @@ done:
951 951
952static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 952static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
953{ 953{
954 struct net *net = skb->sk->sk_net; 954 struct net *net = sock_net(skb->sk);
955 struct nlattr *tca[TCA_ACT_MAX + 1]; 955 struct nlattr *tca[TCA_ACT_MAX + 1];
956 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 956 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
957 int ret = 0, ovr = 0; 957 int ret = 0, ovr = 0;
@@ -1029,7 +1029,7 @@ find_dump_kind(struct nlmsghdr *n)
1029static int 1029static int
1030tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1030tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1031{ 1031{
1032 struct net *net = skb->sk->sk_net; 1032 struct net *net = sock_net(skb->sk);
1033 struct nlmsghdr *nlh; 1033 struct nlmsghdr *nlh;
1034 unsigned char *b = skb_tail_pointer(skb); 1034 unsigned char *b = skb_tail_pointer(skb);
1035 struct nlattr *nest; 1035 struct nlattr *nest;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index fbde461b716c..64b2d136c78e 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -115,7 +115,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
115 return -EINVAL; 115 return -EINVAL;
116 116
117 datalen = nla_len(tb[TCA_DEF_DATA]); 117 datalen = nla_len(tb[TCA_DEF_DATA]);
118 if (datalen <= 0) 118 if (datalen == 0)
119 return -EINVAL; 119 return -EINVAL;
120 120
121 pc = tcf_hash_check(parm->index, a, bind, &simp_hash_info); 121 pc = tcf_hash_check(parm->index, a, bind, &simp_hash_info);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 0fbedcabf111..1086df7478bc 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -118,7 +118,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
118 118
119static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 119static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
120{ 120{
121 struct net *net = skb->sk->sk_net; 121 struct net *net = sock_net(skb->sk);
122 struct nlattr *tca[TCA_MAX + 1]; 122 struct nlattr *tca[TCA_MAX + 1];
123 struct tcmsg *t; 123 struct tcmsg *t;
124 u32 protocol; 124 u32 protocol;
@@ -389,7 +389,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
389 389
390static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 390static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
391{ 391{
392 struct net *net = skb->sk->sk_net; 392 struct net *net = sock_net(skb->sk);
393 int t; 393 int t;
394 int s_t; 394 int s_t;
395 struct net_device *dev; 395 struct net_device *dev;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 3da4129b89d1..72cf86e3c090 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -256,10 +256,10 @@ META_COLLECTOR(int_rtclassid)
256 256
257META_COLLECTOR(int_rtiif) 257META_COLLECTOR(int_rtiif)
258{ 258{
259 if (unlikely(skb->dst == NULL)) 259 if (unlikely(skb->rtable == NULL))
260 *err = -1; 260 *err = -1;
261 else 261 else
262 dst->value = ((struct rtable*) skb->dst)->fl.iif; 262 dst->value = skb->rtable->fl.iif;
263} 263}
264 264
265/************************************************************************** 265/**************************************************************************
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index fc8708a0a25e..c40773cdbe45 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -608,7 +608,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
608 608
609static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 609static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
610{ 610{
611 struct net *net = skb->sk->sk_net; 611 struct net *net = sock_net(skb->sk);
612 struct tcmsg *tcm = NLMSG_DATA(n); 612 struct tcmsg *tcm = NLMSG_DATA(n);
613 struct nlattr *tca[TCA_MAX + 1]; 613 struct nlattr *tca[TCA_MAX + 1];
614 struct net_device *dev; 614 struct net_device *dev;
@@ -677,7 +677,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
677 677
678static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 678static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
679{ 679{
680 struct net *net = skb->sk->sk_net; 680 struct net *net = sock_net(skb->sk);
681 struct tcmsg *tcm; 681 struct tcmsg *tcm;
682 struct nlattr *tca[TCA_MAX + 1]; 682 struct nlattr *tca[TCA_MAX + 1];
683 struct net_device *dev; 683 struct net_device *dev;
@@ -896,7 +896,7 @@ err_out:
896 896
897static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) 897static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
898{ 898{
899 struct net *net = skb->sk->sk_net; 899 struct net *net = sock_net(skb->sk);
900 int idx, q_idx; 900 int idx, q_idx;
901 int s_idx, s_q_idx; 901 int s_idx, s_q_idx;
902 struct net_device *dev; 902 struct net_device *dev;
@@ -948,7 +948,7 @@ done:
948 948
949static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 949static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
950{ 950{
951 struct net *net = skb->sk->sk_net; 951 struct net *net = sock_net(skb->sk);
952 struct tcmsg *tcm = NLMSG_DATA(n); 952 struct tcmsg *tcm = NLMSG_DATA(n);
953 struct nlattr *tca[TCA_MAX + 1]; 953 struct nlattr *tca[TCA_MAX + 1];
954 struct net_device *dev; 954 struct net_device *dev;
@@ -1142,7 +1142,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
1142 1142
1143static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 1143static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1144{ 1144{
1145 struct net *net = skb->sk->sk_net; 1145 struct net *net = sock_net(skb->sk);
1146 int t; 1146 int t;
1147 int s_t; 1147 int s_t;
1148 struct net_device *dev; 1148 struct net_device *dev;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index d29f792e0529..b4cd2b71953f 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -718,12 +718,11 @@ struct sctp_transport *sctp_assoc_lookup_paddr(
718 const union sctp_addr *address) 718 const union sctp_addr *address)
719{ 719{
720 struct sctp_transport *t; 720 struct sctp_transport *t;
721 struct list_head *pos;
722 721
723 /* Cycle through all transports searching for a peer address. */ 722 /* Cycle through all transports searching for a peer address. */
724 723
725 list_for_each(pos, &asoc->peer.transport_addr_list) { 724 list_for_each_entry(t, &asoc->peer.transport_addr_list,
726 t = list_entry(pos, struct sctp_transport, transports); 725 transports) {
727 if (sctp_cmp_addr_exact(address, &t->ipaddr)) 726 if (sctp_cmp_addr_exact(address, &t->ipaddr))
728 return t; 727 return t;
729 } 728 }
@@ -762,7 +761,6 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
762 struct sctp_transport *second; 761 struct sctp_transport *second;
763 struct sctp_ulpevent *event; 762 struct sctp_ulpevent *event;
764 struct sockaddr_storage addr; 763 struct sockaddr_storage addr;
765 struct list_head *pos;
766 int spc_state = 0; 764 int spc_state = 0;
767 765
768 /* Record the transition on the transport. */ 766 /* Record the transition on the transport. */
@@ -814,8 +812,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
814 */ 812 */
815 first = NULL; second = NULL; 813 first = NULL; second = NULL;
816 814
817 list_for_each(pos, &asoc->peer.transport_addr_list) { 815 list_for_each_entry(t, &asoc->peer.transport_addr_list,
818 t = list_entry(pos, struct sctp_transport, transports); 816 transports) {
819 817
820 if ((t->state == SCTP_INACTIVE) || 818 if ((t->state == SCTP_INACTIVE) ||
821 (t->state == SCTP_UNCONFIRMED)) 819 (t->state == SCTP_UNCONFIRMED))
@@ -932,7 +930,6 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
932{ 930{
933 struct sctp_transport *active; 931 struct sctp_transport *active;
934 struct sctp_transport *match; 932 struct sctp_transport *match;
935 struct list_head *entry, *pos;
936 struct sctp_transport *transport; 933 struct sctp_transport *transport;
937 struct sctp_chunk *chunk; 934 struct sctp_chunk *chunk;
938 __be32 key = htonl(tsn); 935 __be32 key = htonl(tsn);
@@ -956,8 +953,8 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
956 953
957 active = asoc->peer.active_path; 954 active = asoc->peer.active_path;
958 955
959 list_for_each(entry, &active->transmitted) { 956 list_for_each_entry(chunk, &active->transmitted,
960 chunk = list_entry(entry, struct sctp_chunk, transmitted_list); 957 transmitted_list) {
961 958
962 if (key == chunk->subh.data_hdr->tsn) { 959 if (key == chunk->subh.data_hdr->tsn) {
963 match = active; 960 match = active;
@@ -966,14 +963,13 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
966 } 963 }
967 964
968 /* If not found, go search all the other transports. */ 965 /* If not found, go search all the other transports. */
969 list_for_each(pos, &asoc->peer.transport_addr_list) { 966 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
970 transport = list_entry(pos, struct sctp_transport, transports); 967 transports) {
971 968
972 if (transport == active) 969 if (transport == active)
973 break; 970 break;
974 list_for_each(entry, &transport->transmitted) { 971 list_for_each_entry(chunk, &transport->transmitted,
975 chunk = list_entry(entry, struct sctp_chunk, 972 transmitted_list) {
976 transmitted_list);
977 if (key == chunk->subh.data_hdr->tsn) { 973 if (key == chunk->subh.data_hdr->tsn) {
978 match = transport; 974 match = transport;
979 goto out; 975 goto out;
@@ -1154,9 +1150,8 @@ void sctp_assoc_update(struct sctp_association *asoc,
1154 1150
1155 } else { 1151 } else {
1156 /* Add any peer addresses from the new association. */ 1152 /* Add any peer addresses from the new association. */
1157 list_for_each(pos, &new->peer.transport_addr_list) { 1153 list_for_each_entry(trans, &new->peer.transport_addr_list,
1158 trans = list_entry(pos, struct sctp_transport, 1154 transports) {
1159 transports);
1160 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) 1155 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1161 sctp_assoc_add_peer(asoc, &trans->ipaddr, 1156 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1162 GFP_ATOMIC, trans->state); 1157 GFP_ATOMIC, trans->state);
@@ -1306,15 +1301,14 @@ struct sctp_transport *sctp_assoc_choose_shutdown_transport(
1306void sctp_assoc_sync_pmtu(struct sctp_association *asoc) 1301void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1307{ 1302{
1308 struct sctp_transport *t; 1303 struct sctp_transport *t;
1309 struct list_head *pos;
1310 __u32 pmtu = 0; 1304 __u32 pmtu = 0;
1311 1305
1312 if (!asoc) 1306 if (!asoc)
1313 return; 1307 return;
1314 1308
1315 /* Get the lowest pmtu of all the transports. */ 1309 /* Get the lowest pmtu of all the transports. */
1316 list_for_each(pos, &asoc->peer.transport_addr_list) { 1310 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1317 t = list_entry(pos, struct sctp_transport, transports); 1311 transports) {
1318 if (t->pmtu_pending && t->dst) { 1312 if (t->pmtu_pending && t->dst) {
1319 sctp_transport_update_pmtu(t, dst_mtu(t->dst)); 1313 sctp_transport_update_pmtu(t, dst_mtu(t->dst));
1320 t->pmtu_pending = 0; 1314 t->pmtu_pending = 0;
@@ -1330,7 +1324,7 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1330 } 1324 }
1331 1325
1332 SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", 1326 SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
1333 __FUNCTION__, asoc, asoc->pathmtu, asoc->frag_point); 1327 __func__, asoc, asoc->pathmtu, asoc->frag_point);
1334} 1328}
1335 1329
1336/* Should we send a SACK to update our peer? */ 1330/* Should we send a SACK to update our peer? */
@@ -1370,7 +1364,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
1370 } 1364 }
1371 1365
1372 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " 1366 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
1373 "- %u\n", __FUNCTION__, asoc, len, asoc->rwnd, 1367 "- %u\n", __func__, asoc, len, asoc->rwnd,
1374 asoc->rwnd_over, asoc->a_rwnd); 1368 asoc->rwnd_over, asoc->a_rwnd);
1375 1369
1376 /* Send a window update SACK if the rwnd has increased by at least the 1370 /* Send a window update SACK if the rwnd has increased by at least the
@@ -1381,7 +1375,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
1381 if (sctp_peer_needs_update(asoc)) { 1375 if (sctp_peer_needs_update(asoc)) {
1382 asoc->a_rwnd = asoc->rwnd; 1376 asoc->a_rwnd = asoc->rwnd;
1383 SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " 1377 SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
1384 "rwnd: %u a_rwnd: %u\n", __FUNCTION__, 1378 "rwnd: %u a_rwnd: %u\n", __func__,
1385 asoc, asoc->rwnd, asoc->a_rwnd); 1379 asoc, asoc->rwnd, asoc->a_rwnd);
1386 sack = sctp_make_sack(asoc); 1380 sack = sctp_make_sack(asoc);
1387 if (!sack) 1381 if (!sack)
@@ -1410,7 +1404,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
1410 asoc->rwnd = 0; 1404 asoc->rwnd = 0;
1411 } 1405 }
1412 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", 1406 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n",
1413 __FUNCTION__, asoc, len, asoc->rwnd, 1407 __func__, asoc, len, asoc->rwnd,
1414 asoc->rwnd_over); 1408 asoc->rwnd_over);
1415} 1409}
1416 1410
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index ceefda025e2d..80e6df06967a 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -67,15 +67,13 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
67 int flags) 67 int flags)
68{ 68{
69 struct sctp_sockaddr_entry *addr; 69 struct sctp_sockaddr_entry *addr;
70 struct list_head *pos;
71 int error = 0; 70 int error = 0;
72 71
73 /* All addresses share the same port. */ 72 /* All addresses share the same port. */
74 dest->port = src->port; 73 dest->port = src->port;
75 74
76 /* Extract the addresses which are relevant for this scope. */ 75 /* Extract the addresses which are relevant for this scope. */
77 list_for_each(pos, &src->address_list) { 76 list_for_each_entry(addr, &src->address_list, list) {
78 addr = list_entry(pos, struct sctp_sockaddr_entry, list);
79 error = sctp_copy_one_addr(dest, &addr->a, scope, 77 error = sctp_copy_one_addr(dest, &addr->a, scope,
80 gfp, flags); 78 gfp, flags);
81 if (error < 0) 79 if (error < 0)
@@ -87,9 +85,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
87 * the assumption that we must be sitting behind a NAT. 85 * the assumption that we must be sitting behind a NAT.
88 */ 86 */
89 if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) { 87 if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) {
90 list_for_each(pos, &src->address_list) { 88 list_for_each_entry(addr, &src->address_list, list) {
91 addr = list_entry(pos, struct sctp_sockaddr_entry,
92 list);
93 error = sctp_copy_one_addr(dest, &addr->a, 89 error = sctp_copy_one_addr(dest, &addr->a,
94 SCTP_SCOPE_LINK, gfp, 90 SCTP_SCOPE_LINK, gfp,
95 flags); 91 flags);
@@ -115,14 +111,12 @@ int sctp_bind_addr_dup(struct sctp_bind_addr *dest,
115 gfp_t gfp) 111 gfp_t gfp)
116{ 112{
117 struct sctp_sockaddr_entry *addr; 113 struct sctp_sockaddr_entry *addr;
118 struct list_head *pos;
119 int error = 0; 114 int error = 0;
120 115
121 /* All addresses share the same port. */ 116 /* All addresses share the same port. */
122 dest->port = src->port; 117 dest->port = src->port;
123 118
124 list_for_each(pos, &src->address_list) { 119 list_for_each_entry(addr, &src->address_list, list) {
125 addr = list_entry(pos, struct sctp_sockaddr_entry, list);
126 error = sctp_add_bind_addr(dest, &addr->a, 1, gfp); 120 error = sctp_add_bind_addr(dest, &addr->a, 1, gfp);
127 if (error < 0) 121 if (error < 0)
128 break; 122 break;
@@ -273,8 +267,7 @@ union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
273 267
274 addrparms = retval; 268 addrparms = retval;
275 269
276 list_for_each(pos, &bp->address_list) { 270 list_for_each_entry(addr, &bp->address_list, list) {
277 addr = list_entry(pos, struct sctp_sockaddr_entry, list);
278 af = sctp_get_af_specific(addr->a.v4.sin_family); 271 af = sctp_get_af_specific(addr->a.v4.sin_family);
279 len = af->to_addr_param(&addr->a, &rawaddr); 272 len = af->to_addr_param(&addr->a, &rawaddr);
280 memcpy(addrparms.v, &rawaddr, len); 273 memcpy(addrparms.v, &rawaddr, len);
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 4d3128f5ccc3..1748ef90950c 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -66,9 +66,10 @@ SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
66{ 66{
67 struct sctp_datamsg *msg; 67 struct sctp_datamsg *msg;
68 msg = kmalloc(sizeof(struct sctp_datamsg), gfp); 68 msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
69 if (msg) 69 if (msg) {
70 sctp_datamsg_init(msg); 70 sctp_datamsg_init(msg);
71 SCTP_DBG_OBJCNT_INC(datamsg); 71 SCTP_DBG_OBJCNT_INC(datamsg);
72 }
72 return msg; 73 return msg;
73} 74}
74 75
@@ -136,20 +137,6 @@ void sctp_datamsg_put(struct sctp_datamsg *msg)
136 sctp_datamsg_destroy(msg); 137 sctp_datamsg_destroy(msg);
137} 138}
138 139
139/* Free a message. Really just give up a reference, the
140 * really free happens in sctp_datamsg_destroy().
141 */
142void sctp_datamsg_free(struct sctp_datamsg *msg)
143{
144 sctp_datamsg_put(msg);
145}
146
147/* Hold on to all the fragments until all chunks have been sent. */
148void sctp_datamsg_track(struct sctp_chunk *chunk)
149{
150 sctp_chunk_hold(chunk);
151}
152
153/* Assign a chunk to this datamsg. */ 140/* Assign a chunk to this datamsg. */
154static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk) 141static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk)
155{ 142{
@@ -189,7 +176,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
189 msecs_to_jiffies(sinfo->sinfo_timetolive); 176 msecs_to_jiffies(sinfo->sinfo_timetolive);
190 msg->can_abandon = 1; 177 msg->can_abandon = 1;
191 SCTP_DEBUG_PRINTK("%s: msg:%p expires_at: %ld jiffies:%ld\n", 178 SCTP_DEBUG_PRINTK("%s: msg:%p expires_at: %ld jiffies:%ld\n",
192 __FUNCTION__, msg, msg->expires_at, jiffies); 179 __func__, msg, msg->expires_at, jiffies);
193 } 180 }
194 181
195 max = asoc->frag_point; 182 max = asoc->frag_point;
@@ -295,7 +282,7 @@ errout:
295 chunk = list_entry(pos, struct sctp_chunk, frag_list); 282 chunk = list_entry(pos, struct sctp_chunk, frag_list);
296 sctp_chunk_free(chunk); 283 sctp_chunk_free(chunk);
297 } 284 }
298 sctp_datamsg_free(msg); 285 sctp_datamsg_put(msg);
299 return NULL; 286 return NULL;
300} 287}
301 288
diff --git a/net/sctp/command.c b/net/sctp/command.c
index bb977330002a..c0044019db9e 100644
--- a/net/sctp/command.c
+++ b/net/sctp/command.c
@@ -52,18 +52,12 @@ int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
52/* Add a command to a sctp_cmd_seq_t. 52/* Add a command to a sctp_cmd_seq_t.
53 * Return 0 if the command sequence is full. 53 * Return 0 if the command sequence is full.
54 */ 54 */
55int sctp_add_cmd(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj) 55void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj)
56{ 56{
57 if (seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS) 57 BUG_ON(seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS);
58 goto fail;
59 58
60 seq->cmds[seq->next_free_slot].verb = verb; 59 seq->cmds[seq->next_free_slot].verb = verb;
61 seq->cmds[seq->next_free_slot++].obj = obj; 60 seq->cmds[seq->next_free_slot++].obj = obj;
62
63 return 1;
64
65fail:
66 return 0;
67} 61}
68 62
69/* Return the next command structure in a sctp_cmd_seq. 63/* Return the next command structure in a sctp_cmd_seq.
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 812ff1756c3e..ca6b022b1df2 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -409,7 +409,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
409 struct sctp_association *asoc, 409 struct sctp_association *asoc,
410 struct sctp_transport *t) 410 struct sctp_transport *t)
411{ 411{
412 SCTP_DEBUG_PRINTK("%s\n", __FUNCTION__); 412 SCTP_DEBUG_PRINTK("%s\n", __func__);
413 413
414 sctp_do_sm(SCTP_EVENT_T_OTHER, 414 sctp_do_sm(SCTP_EVENT_T_OTHER,
415 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), 415 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
@@ -725,7 +725,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
725 } 725 }
726 726
727 ep = sctp_sk((sctp_get_ctl_sock()))->ep; 727 ep = sctp_sk((sctp_get_ctl_sock()))->ep;
728 epb = &ep->base;
729 728
730hit: 729hit:
731 sctp_endpoint_hold(ep); 730 sctp_endpoint_hold(ep);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 85f1495e0edc..e45e44c60635 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -226,7 +226,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
226 226
227 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " 227 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, "
228 "src:" NIP6_FMT " dst:" NIP6_FMT "\n", 228 "src:" NIP6_FMT " dst:" NIP6_FMT "\n",
229 __FUNCTION__, skb, skb->len, 229 __func__, skb, skb->len,
230 NIP6(fl.fl6_src), NIP6(fl.fl6_dst)); 230 NIP6(fl.fl6_src), NIP6(fl.fl6_dst));
231 231
232 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 232 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
@@ -251,7 +251,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
251 251
252 252
253 SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ", 253 SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ",
254 __FUNCTION__, NIP6(fl.fl6_dst)); 254 __func__, NIP6(fl.fl6_dst));
255 255
256 if (saddr) { 256 if (saddr) {
257 ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr); 257 ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr);
@@ -260,7 +260,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
260 NIP6(fl.fl6_src)); 260 NIP6(fl.fl6_src));
261 } 261 }
262 262
263 dst = ip6_route_output(NULL, &fl); 263 dst = ip6_route_output(&init_net, NULL, &fl);
264 if (!dst->error) { 264 if (!dst->error) {
265 struct rt6_info *rt; 265 struct rt6_info *rt;
266 rt = (struct rt6_info *)dst; 266 rt = (struct rt6_info *)dst;
@@ -313,10 +313,13 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
313 313
314 SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p " 314 SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p "
315 "daddr:" NIP6_FMT " ", 315 "daddr:" NIP6_FMT " ",
316 __FUNCTION__, asoc, dst, NIP6(daddr->v6.sin6_addr)); 316 __func__, asoc, dst, NIP6(daddr->v6.sin6_addr));
317 317
318 if (!asoc) { 318 if (!asoc) {
319 ipv6_get_saddr(dst, &daddr->v6.sin6_addr,&saddr->v6.sin6_addr); 319 ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL,
320 &daddr->v6.sin6_addr,
321 inet6_sk(asoc->base.sk)->srcprefs,
322 &saddr->v6.sin6_addr);
320 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", 323 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n",
321 NIP6(saddr->v6.sin6_addr)); 324 NIP6(saddr->v6.sin6_addr));
322 return; 325 return;
@@ -351,7 +354,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
351 } else { 354 } else {
352 printk(KERN_ERR "%s: asoc:%p Could not find a valid source " 355 printk(KERN_ERR "%s: asoc:%p Could not find a valid source "
353 "address for the dest:" NIP6_FMT "\n", 356 "address for the dest:" NIP6_FMT "\n",
354 __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr)); 357 __func__, asoc, NIP6(daddr->v6.sin6_addr));
355 } 358 }
356 359
357 rcu_read_unlock(); 360 rcu_read_unlock();
@@ -634,7 +637,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
634 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 637 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
635 struct sctp6_sock *newsctp6sk; 638 struct sctp6_sock *newsctp6sk;
636 639
637 newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot); 640 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot);
638 if (!newsk) 641 if (!newsk)
639 goto out; 642 goto out;
640 643
diff --git a/net/sctp/output.c b/net/sctp/output.c
index aa700feea76c..cf4f9fb6819d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -74,7 +74,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
74{ 74{
75 struct sctp_chunk *chunk = NULL; 75 struct sctp_chunk *chunk = NULL;
76 76
77 SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __FUNCTION__, 77 SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
78 packet, vtag); 78 packet, vtag);
79 79
80 packet->vtag = vtag; 80 packet->vtag = vtag;
@@ -106,7 +106,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
106 struct sctp_association *asoc = transport->asoc; 106 struct sctp_association *asoc = transport->asoc;
107 size_t overhead; 107 size_t overhead;
108 108
109 SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __FUNCTION__, 109 SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __func__,
110 packet, transport); 110 packet, transport);
111 111
112 packet->transport = transport; 112 packet->transport = transport;
@@ -138,7 +138,7 @@ void sctp_packet_free(struct sctp_packet *packet)
138{ 138{
139 struct sctp_chunk *chunk, *tmp; 139 struct sctp_chunk *chunk, *tmp;
140 140
141 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); 141 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet);
142 142
143 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 143 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
144 list_del_init(&chunk->list); 144 list_del_init(&chunk->list);
@@ -162,7 +162,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
162 sctp_xmit_t retval; 162 sctp_xmit_t retval;
163 int error = 0; 163 int error = 0;
164 164
165 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, 165 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__,
166 packet, chunk); 166 packet, chunk);
167 167
168 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { 168 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
@@ -264,7 +264,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
264 size_t pmtu; 264 size_t pmtu;
265 int too_big; 265 int too_big;
266 266
267 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, packet, 267 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
268 chunk); 268 chunk);
269 269
270 /* Try to bundle AUTH chunk */ 270 /* Try to bundle AUTH chunk */
@@ -372,7 +372,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
372 unsigned char *auth = NULL; /* pointer to auth in skb data */ 372 unsigned char *auth = NULL; /* pointer to auth in skb data */
373 __u32 cksum_buf_len = sizeof(struct sctphdr); 373 __u32 cksum_buf_len = sizeof(struct sctphdr);
374 374
375 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); 375 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet);
376 376
377 /* Do NOT generate a chunkless packet. */ 377 /* Do NOT generate a chunkless packet. */
378 if (list_empty(&packet->chunk_list)) 378 if (list_empty(&packet->chunk_list))
@@ -677,7 +677,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
677 "transport: %p, cwnd: %d, " 677 "transport: %p, cwnd: %d, "
678 "ssthresh: %d, flight_size: %d, " 678 "ssthresh: %d, flight_size: %d, "
679 "pba: %d\n", 679 "pba: %d\n",
680 __FUNCTION__, transport, 680 __func__, transport,
681 transport->cwnd, 681 transport->cwnd,
682 transport->ssthresh, 682 transport->ssthresh,
683 transport->flight_size, 683 transport->flight_size,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c0714469233c..59edfd25a19c 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -221,12 +221,12 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
221void sctp_outq_teardown(struct sctp_outq *q) 221void sctp_outq_teardown(struct sctp_outq *q)
222{ 222{
223 struct sctp_transport *transport; 223 struct sctp_transport *transport;
224 struct list_head *lchunk, *pos, *temp; 224 struct list_head *lchunk, *temp;
225 struct sctp_chunk *chunk, *tmp; 225 struct sctp_chunk *chunk, *tmp;
226 226
227 /* Throw away unacknowledged chunks. */ 227 /* Throw away unacknowledged chunks. */
228 list_for_each(pos, &q->asoc->peer.transport_addr_list) { 228 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
229 transport = list_entry(pos, struct sctp_transport, transports); 229 transports) {
230 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { 230 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
231 chunk = list_entry(lchunk, struct sctp_chunk, 231 chunk = list_entry(lchunk, struct sctp_chunk,
232 transmitted_list); 232 transmitted_list);
@@ -469,7 +469,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
469 469
470 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, " 470 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
471 "cwnd: %d, ssthresh: %d, flight_size: %d, " 471 "cwnd: %d, ssthresh: %d, flight_size: %d, "
472 "pba: %d\n", __FUNCTION__, 472 "pba: %d\n", __func__,
473 transport, reason, 473 transport, reason,
474 transport->cwnd, transport->ssthresh, 474 transport->cwnd, transport->ssthresh,
475 transport->flight_size, 475 transport->flight_size,
@@ -494,6 +494,8 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
494 */ 494 */
495 if (transport == transport->asoc->peer.retran_path) 495 if (transport == transport->asoc->peer.retran_path)
496 sctp_assoc_update_retran_path(transport->asoc); 496 sctp_assoc_update_retran_path(transport->asoc);
497 transport->asoc->rtx_data_chunks +=
498 transport->asoc->unack_data;
497 break; 499 break;
498 case SCTP_RTXR_FAST_RTX: 500 case SCTP_RTXR_FAST_RTX:
499 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 501 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
@@ -504,6 +506,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
504 break; 506 break;
505 case SCTP_RTXR_T1_RTX: 507 case SCTP_RTXR_T1_RTX:
506 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); 508 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
509 transport->asoc->init_retries++;
507 break; 510 break;
508 default: 511 default:
509 BUG(); 512 BUG();
@@ -535,7 +538,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
535 int rtx_timeout, int *start_timer) 538 int rtx_timeout, int *start_timer)
536{ 539{
537 struct list_head *lqueue; 540 struct list_head *lqueue;
538 struct list_head *lchunk, *lchunk1; 541 struct list_head *lchunk;
539 struct sctp_transport *transport = pkt->transport; 542 struct sctp_transport *transport = pkt->transport;
540 sctp_xmit_t status; 543 sctp_xmit_t status;
541 struct sctp_chunk *chunk, *chunk1; 544 struct sctp_chunk *chunk, *chunk1;
@@ -646,9 +649,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
646 * to be marked as ineligible for a subsequent fast retransmit. 649 * to be marked as ineligible for a subsequent fast retransmit.
647 */ 650 */
648 if (rtx_timeout && !lchunk) { 651 if (rtx_timeout && !lchunk) {
649 list_for_each(lchunk1, lqueue) { 652 list_for_each_entry(chunk1, lqueue, transmitted_list) {
650 chunk1 = list_entry(lchunk1, struct sctp_chunk,
651 transmitted_list);
652 if (chunk1->fast_retransmit > 0) 653 if (chunk1->fast_retransmit > 0)
653 chunk1->fast_retransmit = -1; 654 chunk1->fast_retransmit = -1;
654 } 655 }
@@ -1037,7 +1038,6 @@ static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1037static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack, 1038static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack,
1038 struct sctp_association *asoc) 1039 struct sctp_association *asoc)
1039{ 1040{
1040 struct list_head *ltransport, *lchunk;
1041 struct sctp_transport *transport; 1041 struct sctp_transport *transport;
1042 struct sctp_chunk *chunk; 1042 struct sctp_chunk *chunk;
1043 __u32 highest_new_tsn, tsn; 1043 __u32 highest_new_tsn, tsn;
@@ -1045,12 +1045,9 @@ static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack,
1045 1045
1046 highest_new_tsn = ntohl(sack->cum_tsn_ack); 1046 highest_new_tsn = ntohl(sack->cum_tsn_ack);
1047 1047
1048 list_for_each(ltransport, transport_list) { 1048 list_for_each_entry(transport, transport_list, transports) {
1049 transport = list_entry(ltransport, struct sctp_transport, 1049 list_for_each_entry(chunk, &transport->transmitted,
1050 transports); 1050 transmitted_list) {
1051 list_for_each(lchunk, &transport->transmitted) {
1052 chunk = list_entry(lchunk, struct sctp_chunk,
1053 transmitted_list);
1054 tsn = ntohl(chunk->subh.data_hdr->tsn); 1051 tsn = ntohl(chunk->subh.data_hdr->tsn);
1055 1052
1056 if (!chunk->tsn_gap_acked && 1053 if (!chunk->tsn_gap_acked &&
@@ -1073,7 +1070,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1073 struct sctp_association *asoc = q->asoc; 1070 struct sctp_association *asoc = q->asoc;
1074 struct sctp_transport *transport; 1071 struct sctp_transport *transport;
1075 struct sctp_chunk *tchunk = NULL; 1072 struct sctp_chunk *tchunk = NULL;
1076 struct list_head *lchunk, *transport_list, *pos, *temp; 1073 struct list_head *lchunk, *transport_list, *temp;
1077 sctp_sack_variable_t *frags = sack->variable; 1074 sctp_sack_variable_t *frags = sack->variable;
1078 __u32 sack_ctsn, ctsn, tsn; 1075 __u32 sack_ctsn, ctsn, tsn;
1079 __u32 highest_tsn, highest_new_tsn; 1076 __u32 highest_tsn, highest_new_tsn;
@@ -1099,9 +1096,8 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1099 */ 1096 */
1100 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { 1097 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1101 primary->cacc.changeover_active = 0; 1098 primary->cacc.changeover_active = 0;
1102 list_for_each(pos, transport_list) { 1099 list_for_each_entry(transport, transport_list,
1103 transport = list_entry(pos, struct sctp_transport, 1100 transports) {
1104 transports);
1105 transport->cacc.cycling_changeover = 0; 1101 transport->cacc.cycling_changeover = 0;
1106 } 1102 }
1107 } 1103 }
@@ -1116,9 +1112,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1116 */ 1112 */
1117 if (sack->num_gap_ack_blocks && 1113 if (sack->num_gap_ack_blocks &&
1118 primary->cacc.changeover_active) { 1114 primary->cacc.changeover_active) {
1119 list_for_each(pos, transport_list) { 1115 list_for_each_entry(transport, transport_list, transports) {
1120 transport = list_entry(pos, struct sctp_transport,
1121 transports);
1122 transport->cacc.cacc_saw_newack = 0; 1116 transport->cacc.cacc_saw_newack = 0;
1123 } 1117 }
1124 } 1118 }
@@ -1147,9 +1141,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1147 * 1141 *
1148 * This is a MASSIVE candidate for optimization. 1142 * This is a MASSIVE candidate for optimization.
1149 */ 1143 */
1150 list_for_each(pos, transport_list) { 1144 list_for_each_entry(transport, transport_list, transports) {
1151 transport = list_entry(pos, struct sctp_transport,
1152 transports);
1153 sctp_check_transmitted(q, &transport->transmitted, 1145 sctp_check_transmitted(q, &transport->transmitted,
1154 transport, sack, highest_new_tsn); 1146 transport, sack, highest_new_tsn);
1155 /* 1147 /*
@@ -1161,9 +1153,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1161 count_of_newacks ++; 1153 count_of_newacks ++;
1162 } 1154 }
1163 1155
1164 list_for_each(pos, transport_list) { 1156 list_for_each_entry(transport, transport_list, transports) {
1165 transport = list_entry(pos, struct sctp_transport,
1166 transports);
1167 sctp_mark_missing(q, &transport->transmitted, transport, 1157 sctp_mark_missing(q, &transport->transmitted, transport,
1168 highest_new_tsn, count_of_newacks); 1158 highest_new_tsn, count_of_newacks);
1169 } 1159 }
@@ -1206,10 +1196,10 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1206 sctp_generate_fwdtsn(q, sack_ctsn); 1196 sctp_generate_fwdtsn(q, sack_ctsn);
1207 1197
1208 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", 1198 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
1209 __FUNCTION__, sack_ctsn); 1199 __func__, sack_ctsn);
1210 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " 1200 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
1211 "%p is 0x%x. Adv peer ack point: 0x%x\n", 1201 "%p is 0x%x. Adv peer ack point: 0x%x\n",
1212 __FUNCTION__, asoc, ctsn, asoc->adv_peer_ack_point); 1202 __func__, asoc, ctsn, asoc->adv_peer_ack_point);
1213 1203
1214 /* See if all chunks are acked. 1204 /* See if all chunks are acked.
1215 * Make sure the empty queue handler will get run later. 1205 * Make sure the empty queue handler will get run later.
@@ -1220,9 +1210,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1220 if (!q->empty) 1210 if (!q->empty)
1221 goto finish; 1211 goto finish;
1222 1212
1223 list_for_each(pos, transport_list) { 1213 list_for_each_entry(transport, transport_list, transports) {
1224 transport = list_entry(pos, struct sctp_transport,
1225 transports);
1226 q->empty = q->empty && list_empty(&transport->transmitted); 1214 q->empty = q->empty && list_empty(&transport->transmitted);
1227 if (!q->empty) 1215 if (!q->empty)
1228 goto finish; 1216 goto finish;
@@ -1444,7 +1432,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1444 if (tchunk->tsn_gap_acked) { 1432 if (tchunk->tsn_gap_acked) {
1445 SCTP_DEBUG_PRINTK("%s: Receiver reneged on " 1433 SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
1446 "data TSN: 0x%x\n", 1434 "data TSN: 0x%x\n",
1447 __FUNCTION__, 1435 __func__,
1448 tsn); 1436 tsn);
1449 tchunk->tsn_gap_acked = 0; 1437 tchunk->tsn_gap_acked = 0;
1450 1438
@@ -1544,6 +1532,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1544 bytes_acked); 1532 bytes_acked);
1545 1533
1546 transport->flight_size -= bytes_acked; 1534 transport->flight_size -= bytes_acked;
1535 if (transport->flight_size == 0)
1536 transport->partial_bytes_acked = 0;
1547 q->outstanding_bytes -= bytes_acked; 1537 q->outstanding_bytes -= bytes_acked;
1548 } else { 1538 } else {
1549 /* RFC 2960 6.1, sctpimpguide-06 2.15.2 1539 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
@@ -1561,7 +1551,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1561 (sack_ctsn+2 == q->asoc->next_tsn)) { 1551 (sack_ctsn+2 == q->asoc->next_tsn)) {
1562 SCTP_DEBUG_PRINTK("%s: SACK received for zero " 1552 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1563 "window probe: %u\n", 1553 "window probe: %u\n",
1564 __FUNCTION__, sack_ctsn); 1554 __func__, sack_ctsn);
1565 q->asoc->overall_error_count = 0; 1555 q->asoc->overall_error_count = 0;
1566 transport->error_count = 0; 1556 transport->error_count = 0;
1567 } 1557 }
@@ -1596,14 +1586,12 @@ static void sctp_mark_missing(struct sctp_outq *q,
1596 int count_of_newacks) 1586 int count_of_newacks)
1597{ 1587{
1598 struct sctp_chunk *chunk; 1588 struct sctp_chunk *chunk;
1599 struct list_head *pos;
1600 __u32 tsn; 1589 __u32 tsn;
1601 char do_fast_retransmit = 0; 1590 char do_fast_retransmit = 0;
1602 struct sctp_transport *primary = q->asoc->peer.primary_path; 1591 struct sctp_transport *primary = q->asoc->peer.primary_path;
1603 1592
1604 list_for_each(pos, transmitted_queue) { 1593 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1605 1594
1606 chunk = list_entry(pos, struct sctp_chunk, transmitted_list);
1607 tsn = ntohl(chunk->subh.data_hdr->tsn); 1595 tsn = ntohl(chunk->subh.data_hdr->tsn);
1608 1596
1609 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all 1597 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
@@ -1626,7 +1614,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
1626 1614
1627 SCTP_DEBUG_PRINTK( 1615 SCTP_DEBUG_PRINTK(
1628 "%s: TSN 0x%x missing counter: %d\n", 1616 "%s: TSN 0x%x missing counter: %d\n",
1629 __FUNCTION__, tsn, 1617 __func__, tsn,
1630 chunk->tsn_missing_report); 1618 chunk->tsn_missing_report);
1631 } 1619 }
1632 } 1620 }
@@ -1649,7 +1637,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
1649 1637
1650 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " 1638 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1651 "ssthresh: %d, flight_size: %d, pba: %d\n", 1639 "ssthresh: %d, flight_size: %d, pba: %d\n",
1652 __FUNCTION__, transport, transport->cwnd, 1640 __func__, transport, transport->cwnd,
1653 transport->ssthresh, transport->flight_size, 1641 transport->ssthresh, transport->flight_size,
1654 transport->partial_bytes_acked); 1642 transport->partial_bytes_acked);
1655 } 1643 }
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 973f1dbc2ec3..0aba759cb9b7 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -124,7 +124,6 @@ void sctp_snmp_proc_exit(void)
124/* Dump local addresses of an association/endpoint. */ 124/* Dump local addresses of an association/endpoint. */
125static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb) 125static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb)
126{ 126{
127 struct list_head *pos;
128 struct sctp_association *asoc; 127 struct sctp_association *asoc;
129 struct sctp_sockaddr_entry *laddr; 128 struct sctp_sockaddr_entry *laddr;
130 struct sctp_transport *peer; 129 struct sctp_transport *peer;
@@ -137,8 +136,7 @@ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_commo
137 primary = &peer->saddr; 136 primary = &peer->saddr;
138 } 137 }
139 138
140 list_for_each(pos, &epb->bind_addr.address_list) { 139 list_for_each_entry(laddr, &epb->bind_addr.address_list, list) {
141 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
142 addr = &laddr->a; 140 addr = &laddr->a;
143 af = sctp_get_af_specific(addr->sa.sa_family); 141 af = sctp_get_af_specific(addr->sa.sa_family);
144 if (primary && af->cmp_addr(addr, primary)) { 142 if (primary && af->cmp_addr(addr, primary)) {
@@ -151,14 +149,13 @@ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_commo
151/* Dump remote addresses of an association. */ 149/* Dump remote addresses of an association. */
152static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc) 150static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc)
153{ 151{
154 struct list_head *pos;
155 struct sctp_transport *transport; 152 struct sctp_transport *transport;
156 union sctp_addr *addr, *primary; 153 union sctp_addr *addr, *primary;
157 struct sctp_af *af; 154 struct sctp_af *af;
158 155
159 primary = &assoc->peer.primary_addr; 156 primary = &assoc->peer.primary_addr;
160 list_for_each(pos, &assoc->peer.transport_addr_list) { 157 list_for_each_entry(transport, &assoc->peer.transport_addr_list,
161 transport = list_entry(pos, struct sctp_transport, transports); 158 transports) {
162 addr = &transport->ipaddr; 159 addr = &transport->ipaddr;
163 af = sctp_get_af_specific(addr->sa.sa_family); 160 af = sctp_get_af_specific(addr->sa.sa_family);
164 if (af->cmp_addr(addr, primary)) { 161 if (af->cmp_addr(addr, primary)) {
@@ -279,8 +276,10 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
279 *pos = 0; 276 *pos = 0;
280 277
281 if (*pos == 0) 278 if (*pos == 0)
282 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " 279 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT "
283 "RPORT LADDRS <-> RADDRS\n"); 280 "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT "
281 "RPORT LADDRS <-> RADDRS "
282 "HBINT INS OUTS MAXRT T1X T2X RTXC\n");
284 283
285 return (void *)pos; 284 return (void *)pos;
286} 285}
@@ -319,19 +318,25 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
319 assoc = sctp_assoc(epb); 318 assoc = sctp_assoc(epb);
320 sk = epb->sk; 319 sk = epb->sk;
321 seq_printf(seq, 320 seq_printf(seq,
322 "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ", 321 "%8p %8p %-3d %-3d %-2d %-4d "
322 "%4d %8d %8d %7d %5lu %-5d %5d ",
323 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 323 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
324 assoc->state, hash, assoc->assoc_id, 324 assoc->state, hash,
325 assoc->assoc_id,
325 assoc->sndbuf_used, 326 assoc->sndbuf_used,
326 atomic_read(&assoc->rmem_alloc), 327 atomic_read(&assoc->rmem_alloc),
327 sock_i_uid(sk), sock_i_ino(sk), 328 sock_i_uid(sk), sock_i_ino(sk),
328 epb->bind_addr.port, 329 epb->bind_addr.port,
329 assoc->peer.port); 330 assoc->peer.port);
330
331 seq_printf(seq, " "); 331 seq_printf(seq, " ");
332 sctp_seq_dump_local_addrs(seq, epb); 332 sctp_seq_dump_local_addrs(seq, epb);
333 seq_printf(seq, "<-> "); 333 seq_printf(seq, "<-> ");
334 sctp_seq_dump_remote_addrs(seq, assoc); 334 sctp_seq_dump_remote_addrs(seq, assoc);
335 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d ",
336 assoc->hbinterval, assoc->c.sinit_max_instreams,
337 assoc->c.sinit_num_ostreams, assoc->max_retrans,
338 assoc->init_retries, assoc->shutdown_retries,
339 assoc->rtx_data_chunks);
335 seq_printf(seq, "\n"); 340 seq_printf(seq, "\n");
336 } 341 }
337 read_unlock(&head->lock); 342 read_unlock(&head->lock);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index c2dd65d9f38d..0ec234b762c2 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -74,7 +74,7 @@ DEFINE_SPINLOCK(sctp_assocs_id_lock);
74 * the Out-of-the-blue (OOTB) packets. A control sock will be created 74 * the Out-of-the-blue (OOTB) packets. A control sock will be created
75 * for this socket at the initialization time. 75 * for this socket at the initialization time.
76 */ 76 */
77static struct socket *sctp_ctl_socket; 77static struct sock *sctp_ctl_sock;
78 78
79static struct sctp_pf *sctp_pf_inet6_specific; 79static struct sctp_pf *sctp_pf_inet6_specific;
80static struct sctp_pf *sctp_pf_inet_specific; 80static struct sctp_pf *sctp_pf_inet_specific;
@@ -91,7 +91,7 @@ int sysctl_sctp_wmem[3];
91/* Return the address of the control sock. */ 91/* Return the address of the control sock. */
92struct sock *sctp_get_ctl_sock(void) 92struct sock *sctp_get_ctl_sock(void)
93{ 93{
94 return sctp_ctl_socket->sk; 94 return sctp_ctl_sock;
95} 95}
96 96
97/* Set up the proc fs entry for the SCTP protocol. */ 97/* Set up the proc fs entry for the SCTP protocol. */
@@ -363,7 +363,7 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
363 return 0; 363 return 0;
364 364
365 /* Is this a broadcast address? */ 365 /* Is this a broadcast address? */
366 if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST) 366 if (skb && skb->rtable->rt_flags & RTCF_BROADCAST)
367 return 0; 367 return 0;
368 368
369 return 1; 369 return 1;
@@ -451,7 +451,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
451 fl.fl4_src = saddr->v4.sin_addr.s_addr; 451 fl.fl4_src = saddr->v4.sin_addr.s_addr;
452 452
453 SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ", 453 SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ",
454 __FUNCTION__, NIPQUAD(fl.fl4_dst), 454 __func__, NIPQUAD(fl.fl4_dst),
455 NIPQUAD(fl.fl4_src)); 455 NIPQUAD(fl.fl4_src));
456 456
457 if (!ip_route_output_key(&init_net, &rt, &fl)) { 457 if (!ip_route_output_key(&init_net, &rt, &fl)) {
@@ -539,7 +539,7 @@ static void sctp_v4_get_saddr(struct sctp_association *asoc,
539/* What interface did this skb arrive on? */ 539/* What interface did this skb arrive on? */
540static int sctp_v4_skb_iif(const struct sk_buff *skb) 540static int sctp_v4_skb_iif(const struct sk_buff *skb)
541{ 541{
542 return ((struct rtable *)skb->dst)->rt_iif; 542 return skb->rtable->rt_iif;
543} 543}
544 544
545/* Was this packet marked by Explicit Congestion Notification? */ 545/* Was this packet marked by Explicit Congestion Notification? */
@@ -554,7 +554,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
554{ 554{
555 struct inet_sock *inet = inet_sk(sk); 555 struct inet_sock *inet = inet_sk(sk);
556 struct inet_sock *newinet; 556 struct inet_sock *newinet;
557 struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, 557 struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
558 sk->sk_prot); 558 sk->sk_prot);
559 559
560 if (!newsk) 560 if (!newsk)
@@ -630,6 +630,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
630 struct sctp_sockaddr_entry *temp; 630 struct sctp_sockaddr_entry *temp;
631 int found = 0; 631 int found = 0;
632 632
633 if (dev_net(ifa->ifa_dev->dev) != &init_net)
634 return NOTIFY_DONE;
635
633 switch (ev) { 636 switch (ev) {
634 case NETDEV_UP: 637 case NETDEV_UP:
635 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 638 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
@@ -679,16 +682,13 @@ static int sctp_ctl_sock_init(void)
679 else 682 else
680 family = PF_INET; 683 family = PF_INET;
681 684
682 err = sock_create_kern(family, SOCK_SEQPACKET, IPPROTO_SCTP, 685 err = inet_ctl_sock_create(&sctp_ctl_sock, family,
683 &sctp_ctl_socket); 686 SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
684 if (err < 0) { 687 if (err < 0) {
685 printk(KERN_ERR 688 printk(KERN_ERR
686 "SCTP: Failed to create the SCTP control socket.\n"); 689 "SCTP: Failed to create the SCTP control socket.\n");
687 return err; 690 return err;
688 } 691 }
689 sctp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
690 inet_sk(sctp_ctl_socket->sk)->uc_ttl = -1;
691
692 return 0; 692 return 0;
693} 693}
694 694
@@ -828,9 +828,9 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
828{ 828{
829 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " 829 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, "
830 "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", 830 "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n",
831 __FUNCTION__, skb, skb->len, 831 __func__, skb, skb->len,
832 NIPQUAD(((struct rtable *)skb->dst)->rt_src), 832 NIPQUAD(skb->rtable->rt_src),
833 NIPQUAD(((struct rtable *)skb->dst)->rt_dst)); 833 NIPQUAD(skb->rtable->rt_dst));
834 834
835 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 835 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
836 return ip_queue_xmit(skb, ipfragok); 836 return ip_queue_xmit(skb, ipfragok);
@@ -974,24 +974,14 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
974 return 1; 974 return 1;
975} 975}
976 976
977static int __init init_sctp_mibs(void) 977static inline int init_sctp_mibs(void)
978{ 978{
979 sctp_statistics[0] = alloc_percpu(struct sctp_mib); 979 return snmp_mib_init((void**)sctp_statistics, sizeof(struct sctp_mib));
980 if (!sctp_statistics[0])
981 return -ENOMEM;
982 sctp_statistics[1] = alloc_percpu(struct sctp_mib);
983 if (!sctp_statistics[1]) {
984 free_percpu(sctp_statistics[0]);
985 return -ENOMEM;
986 }
987 return 0;
988
989} 980}
990 981
991static void cleanup_sctp_mibs(void) 982static inline void cleanup_sctp_mibs(void)
992{ 983{
993 free_percpu(sctp_statistics[0]); 984 snmp_mib_free((void**)sctp_statistics);
994 free_percpu(sctp_statistics[1]);
995} 985}
996 986
997static void sctp_v4_pf_init(void) 987static void sctp_v4_pf_init(void)
@@ -1286,7 +1276,7 @@ err_v6_add_protocol:
1286 sctp_v6_del_protocol(); 1276 sctp_v6_del_protocol();
1287err_add_protocol: 1277err_add_protocol:
1288 sctp_v4_del_protocol(); 1278 sctp_v4_del_protocol();
1289 sock_release(sctp_ctl_socket); 1279 inet_ctl_sock_destroy(sctp_ctl_sock);
1290err_ctl_sock_init: 1280err_ctl_sock_init:
1291 sctp_v6_protosw_exit(); 1281 sctp_v6_protosw_exit();
1292err_v6_protosw_init: 1282err_v6_protosw_init:
@@ -1330,7 +1320,7 @@ SCTP_STATIC __exit void sctp_exit(void)
1330 sctp_v4_del_protocol(); 1320 sctp_v4_del_protocol();
1331 1321
1332 /* Free the control endpoint. */ 1322 /* Free the control endpoint. */
1333 sock_release(sctp_ctl_socket); 1323 inet_ctl_sock_destroy(sctp_ctl_sock);
1334 1324
1335 /* Free protosw registrations */ 1325 /* Free protosw registrations */
1336 sctp_v6_protosw_exit(); 1326 sctp_v6_protosw_exit();
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 36ebb392472e..81b606424e12 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1782,7 +1782,7 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
1782 const struct sctp_chunk *chunk, 1782 const struct sctp_chunk *chunk,
1783 struct sctp_chunk **errp) 1783 struct sctp_chunk **errp)
1784{ 1784{
1785 char error[] = "The following parameter had invalid length:"; 1785 static const char error[] = "The following parameter had invalid length:";
1786 size_t payload_len = WORD_ROUND(sizeof(error)) + 1786 size_t payload_len = WORD_ROUND(sizeof(error)) +
1787 sizeof(sctp_paramhdr_t); 1787 sizeof(sctp_paramhdr_t);
1788 1788
@@ -2269,8 +2269,8 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
2269 * high (for example, implementations MAY use the size of the receiver 2269 * high (for example, implementations MAY use the size of the receiver
2270 * advertised window). 2270 * advertised window).
2271 */ 2271 */
2272 list_for_each(pos, &asoc->peer.transport_addr_list) { 2272 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
2273 transport = list_entry(pos, struct sctp_transport, transports); 2273 transports) {
2274 transport->ssthresh = asoc->peer.i.a_rwnd; 2274 transport->ssthresh = asoc->peer.i.a_rwnd;
2275 } 2275 }
2276 2276
@@ -3066,7 +3066,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
3066 union sctp_addr addr; 3066 union sctp_addr addr;
3067 struct sctp_bind_addr *bp = &asoc->base.bind_addr; 3067 struct sctp_bind_addr *bp = &asoc->base.bind_addr;
3068 union sctp_addr_param *addr_param; 3068 union sctp_addr_param *addr_param;
3069 struct list_head *pos;
3070 struct sctp_transport *transport; 3069 struct sctp_transport *transport;
3071 struct sctp_sockaddr_entry *saddr; 3070 struct sctp_sockaddr_entry *saddr;
3072 int retval = 0; 3071 int retval = 0;
@@ -3094,9 +3093,8 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
3094 local_bh_disable(); 3093 local_bh_disable();
3095 retval = sctp_del_bind_addr(bp, &addr); 3094 retval = sctp_del_bind_addr(bp, &addr);
3096 local_bh_enable(); 3095 local_bh_enable();
3097 list_for_each(pos, &asoc->peer.transport_addr_list) { 3096 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3098 transport = list_entry(pos, struct sctp_transport, 3097 transports) {
3099 transports);
3100 dst_release(transport->dst); 3098 dst_release(transport->dst);
3101 sctp_transport_route(transport, NULL, 3099 sctp_transport_route(transport, NULL,
3102 sctp_sk(asoc->base.sk)); 3100 sctp_sk(asoc->base.sk));
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index a4763fd24fd8..23a9f1a95b7d 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -243,7 +243,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
243 243
244 sctp_bh_lock_sock(asoc->base.sk); 244 sctp_bh_lock_sock(asoc->base.sk);
245 if (sock_owned_by_user(asoc->base.sk)) { 245 if (sock_owned_by_user(asoc->base.sk)) {
246 SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); 246 SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
247 247
248 /* Try again later. */ 248 /* Try again later. */
249 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) 249 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
@@ -283,7 +283,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
283 sctp_bh_lock_sock(asoc->base.sk); 283 sctp_bh_lock_sock(asoc->base.sk);
284 if (sock_owned_by_user(asoc->base.sk)) { 284 if (sock_owned_by_user(asoc->base.sk)) {
285 SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n", 285 SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n",
286 __FUNCTION__, 286 __func__,
287 timeout_type); 287 timeout_type);
288 288
289 /* Try again later. */ 289 /* Try again later. */
@@ -361,7 +361,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
361 361
362 sctp_bh_lock_sock(asoc->base.sk); 362 sctp_bh_lock_sock(asoc->base.sk);
363 if (sock_owned_by_user(asoc->base.sk)) { 363 if (sock_owned_by_user(asoc->base.sk)) {
364 SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); 364 SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
365 365
366 /* Try again later. */ 366 /* Try again later. */
367 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) 367 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
@@ -545,14 +545,12 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
545 struct sctp_association *asoc) 545 struct sctp_association *asoc)
546{ 546{
547 struct sctp_transport *t; 547 struct sctp_transport *t;
548 struct list_head *pos;
549 548
550 /* Start a heartbeat timer for each transport on the association. 549 /* Start a heartbeat timer for each transport on the association.
551 * hold a reference on the transport to make sure none of 550 * hold a reference on the transport to make sure none of
552 * the needed data structures go away. 551 * the needed data structures go away.
553 */ 552 */
554 list_for_each(pos, &asoc->peer.transport_addr_list) { 553 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
555 t = list_entry(pos, struct sctp_transport, transports);
556 554
557 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 555 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
558 sctp_transport_hold(t); 556 sctp_transport_hold(t);
@@ -563,12 +561,11 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
563 struct sctp_association *asoc) 561 struct sctp_association *asoc)
564{ 562{
565 struct sctp_transport *t; 563 struct sctp_transport *t;
566 struct list_head *pos;
567 564
568 /* Stop all heartbeat timers. */ 565 /* Stop all heartbeat timers. */
569 566
570 list_for_each(pos, &asoc->peer.transport_addr_list) { 567 list_for_each_entry(t, &asoc->peer.transport_addr_list,
571 t = list_entry(pos, struct sctp_transport, transports); 568 transports) {
572 if (del_timer(&t->hb_timer)) 569 if (del_timer(&t->hb_timer))
573 sctp_transport_put(t); 570 sctp_transport_put(t);
574 } 571 }
@@ -579,10 +576,9 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
579 struct sctp_association *asoc) 576 struct sctp_association *asoc)
580{ 577{
581 struct sctp_transport *t; 578 struct sctp_transport *t;
582 struct list_head *pos;
583 579
584 list_for_each(pos, &asoc->peer.transport_addr_list) { 580 list_for_each_entry(t, &asoc->peer.transport_addr_list,
585 t = list_entry(pos, struct sctp_transport, transports); 581 transports) {
586 if (timer_pending(&t->T3_rtx_timer) && 582 if (timer_pending(&t->T3_rtx_timer) &&
587 del_timer(&t->T3_rtx_timer)) { 583 del_timer(&t->T3_rtx_timer)) {
588 sctp_transport_put(t); 584 sctp_transport_put(t);
@@ -593,7 +589,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
593 589
594/* Helper function to update the heartbeat timer. */ 590/* Helper function to update the heartbeat timer. */
595static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, 591static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
596 struct sctp_association *asoc,
597 struct sctp_transport *t) 592 struct sctp_transport *t)
598{ 593{
599 /* Update the heartbeat timer. */ 594 /* Update the heartbeat timer. */
@@ -1065,7 +1060,6 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1065 struct sctp_chunk *new_obj; 1060 struct sctp_chunk *new_obj;
1066 struct sctp_chunk *chunk = NULL; 1061 struct sctp_chunk *chunk = NULL;
1067 struct sctp_packet *packet; 1062 struct sctp_packet *packet;
1068 struct list_head *pos;
1069 struct timer_list *timer; 1063 struct timer_list *timer;
1070 unsigned long timeout; 1064 unsigned long timeout;
1071 struct sctp_transport *t; 1065 struct sctp_transport *t;
@@ -1397,9 +1391,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1397 /* If we've sent any data bundled with 1391 /* If we've sent any data bundled with
1398 * COOKIE-ECHO we need to resend. 1392 * COOKIE-ECHO we need to resend.
1399 */ 1393 */
1400 list_for_each(pos, &asoc->peer.transport_addr_list) { 1394 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1401 t = list_entry(pos, struct sctp_transport, 1395 transports) {
1402 transports);
1403 sctp_retransmit_mark(&asoc->outqueue, t, 1396 sctp_retransmit_mark(&asoc->outqueue, t,
1404 SCTP_RTXR_T1_RTX); 1397 SCTP_RTXR_T1_RTX);
1405 } 1398 }
@@ -1457,7 +1450,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1457 1450
1458 case SCTP_CMD_HB_TIMER_UPDATE: 1451 case SCTP_CMD_HB_TIMER_UPDATE:
1459 t = cmd->obj.transport; 1452 t = cmd->obj.transport;
1460 sctp_cmd_hb_timer_update(commands, asoc, t); 1453 sctp_cmd_hb_timer_update(commands, t);
1461 break; 1454 break;
1462 1455
1463 case SCTP_CMD_HB_TIMERS_STOP: 1456 case SCTP_CMD_HB_TIMERS_STOP:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 07194c2a32df..0c9d5a6950fe 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1124,7 +1124,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1124 printk(KERN_WARNING 1124 printk(KERN_WARNING
1125 "%s association %p could not find address " 1125 "%s association %p could not find address "
1126 NIP6_FMT "\n", 1126 NIP6_FMT "\n",
1127 __FUNCTION__, 1127 __func__,
1128 asoc, 1128 asoc,
1129 NIP6(from_addr.v6.sin6_addr)); 1129 NIP6(from_addr.v6.sin6_addr));
1130 } else { 1130 } else {
@@ -1132,7 +1132,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1132 printk(KERN_WARNING 1132 printk(KERN_WARNING
1133 "%s association %p could not find address " 1133 "%s association %p could not find address "
1134 NIPQUAD_FMT "\n", 1134 NIPQUAD_FMT "\n",
1135 __FUNCTION__, 1135 __func__,
1136 asoc, 1136 asoc,
1137 NIPQUAD(from_addr.v4.sin_addr.s_addr)); 1137 NIPQUAD(from_addr.v4.sin_addr.s_addr));
1138 } 1138 }
@@ -1150,7 +1150,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1150 time_after(jiffies, hbinfo->sent_at + max_interval)) { 1150 time_after(jiffies, hbinfo->sent_at + max_interval)) {
1151 SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp " 1151 SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp "
1152 "received for transport: %p\n", 1152 "received for transport: %p\n",
1153 __FUNCTION__, link); 1153 __func__, link);
1154 return SCTP_DISPOSITION_DISCARD; 1154 return SCTP_DISPOSITION_DISCARD;
1155 } 1155 }
1156 1156
@@ -1226,7 +1226,6 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
1226 sctp_cmd_seq_t *commands) 1226 sctp_cmd_seq_t *commands)
1227{ 1227{
1228 struct sctp_transport *new_addr, *addr; 1228 struct sctp_transport *new_addr, *addr;
1229 struct list_head *pos, *pos2;
1230 int found; 1229 int found;
1231 1230
1232 /* Implementor's Guide - Sectin 5.2.2 1231 /* Implementor's Guide - Sectin 5.2.2
@@ -1243,12 +1242,11 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
1243 new_addr = NULL; 1242 new_addr = NULL;
1244 found = 0; 1243 found = 0;
1245 1244
1246 list_for_each(pos, &new_asoc->peer.transport_addr_list) { 1245 list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
1247 new_addr = list_entry(pos, struct sctp_transport, transports); 1246 transports) {
1248 found = 0; 1247 found = 0;
1249 list_for_each(pos2, &asoc->peer.transport_addr_list) { 1248 list_for_each_entry(addr, &asoc->peer.transport_addr_list,
1250 addr = list_entry(pos2, struct sctp_transport, 1249 transports) {
1251 transports);
1252 if (sctp_cmp_addr_exact(&new_addr->ipaddr, 1250 if (sctp_cmp_addr_exact(&new_addr->ipaddr,
1253 &addr->ipaddr)) { 1251 &addr->ipaddr)) {
1254 found = 1; 1252 found = 1;
@@ -3135,12 +3133,8 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3135 if (!ev) 3133 if (!ev)
3136 goto nomem; 3134 goto nomem;
3137 3135
3138 if (!sctp_add_cmd(commands, SCTP_CMD_EVENT_ULP, 3136 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
3139 SCTP_ULPEVENT(ev))) { 3137 SCTP_ULPEVENT(ev));
3140 sctp_ulpevent_free(ev);
3141 goto nomem;
3142 }
3143
3144 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 3138 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3145 SCTP_CHUNK(chunk)); 3139 SCTP_CHUNK(chunk));
3146 } 3140 }
@@ -3668,7 +3662,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
3668 skb_pull(chunk->skb, len); 3662 skb_pull(chunk->skb, len);
3669 3663
3670 tsn = ntohl(fwdtsn_hdr->new_cum_tsn); 3664 tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
3671 SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __FUNCTION__, tsn); 3665 SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
3672 3666
3673 /* The TSN is too high--silently discard the chunk and count on it 3667 /* The TSN is too high--silently discard the chunk and count on it
3674 * getting retransmitted later. 3668 * getting retransmitted later.
@@ -3728,7 +3722,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
3728 skb_pull(chunk->skb, len); 3722 skb_pull(chunk->skb, len);
3729 3723
3730 tsn = ntohl(fwdtsn_hdr->new_cum_tsn); 3724 tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
3731 SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __FUNCTION__, tsn); 3725 SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
3732 3726
3733 /* The TSN is too high--silently discard the chunk and count on it 3727 /* The TSN is too high--silently discard the chunk and count on it
3734 * getting retransmitted later. 3728 * getting retransmitted later.
@@ -4237,7 +4231,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4237 void *arg, 4231 void *arg,
4238 sctp_cmd_seq_t *commands) 4232 sctp_cmd_seq_t *commands)
4239{ 4233{
4240 char err_str[]="The following chunk had invalid length:"; 4234 static const char err_str[]="The following chunk had invalid length:";
4241 4235
4242 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, 4236 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
4243 sizeof(err_str)); 4237 sizeof(err_str));
@@ -4254,7 +4248,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
4254 const sctp_subtype_t type, 4248 const sctp_subtype_t type,
4255 void *arg, 4249 void *arg,
4256 sctp_cmd_seq_t *commands) { 4250 sctp_cmd_seq_t *commands) {
4257 char err_str[] = "The following parameter had invalid length:"; 4251 static const char err_str[] = "The following parameter had invalid length:";
4258 4252
4259 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, 4253 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
4260 sizeof(err_str)); 4254 sizeof(err_str));
@@ -4273,7 +4267,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
4273 void *arg, 4267 void *arg,
4274 sctp_cmd_seq_t *commands) 4268 sctp_cmd_seq_t *commands)
4275{ 4269{
4276 char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; 4270 static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
4277 4271
4278 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, 4272 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
4279 sizeof(err_str)); 4273 sizeof(err_str));
@@ -4292,7 +4286,7 @@ static sctp_disposition_t sctp_sf_violation_chunk(
4292 void *arg, 4286 void *arg,
4293 sctp_cmd_seq_t *commands) 4287 sctp_cmd_seq_t *commands)
4294{ 4288{
4295 char err_str[]="The following chunk violates protocol:"; 4289 static const char err_str[]="The following chunk violates protocol:";
4296 4290
4297 if (!asoc) 4291 if (!asoc)
4298 return sctp_sf_violation(ep, asoc, type, arg, commands); 4292 return sctp_sf_violation(ep, asoc, type, arg, commands);
@@ -5331,6 +5325,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
5331 SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); 5325 SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
5332 SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS); 5326 SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
5333 5327
5328 ((struct sctp_association *)asoc)->shutdown_retries++;
5329
5334 if (asoc->overall_error_count >= asoc->max_retrans) { 5330 if (asoc->overall_error_count >= asoc->max_retrans) {
5335 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5331 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5336 SCTP_ERROR(ETIMEDOUT)); 5332 SCTP_ERROR(ETIMEDOUT));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 998e63a31311..e7e3baf7009e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -513,7 +513,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
513 union sctp_addr saveaddr; 513 union sctp_addr saveaddr;
514 void *addr_buf; 514 void *addr_buf;
515 struct sctp_af *af; 515 struct sctp_af *af;
516 struct list_head *pos;
517 struct list_head *p; 516 struct list_head *p;
518 int i; 517 int i;
519 int retval = 0; 518 int retval = 0;
@@ -525,10 +524,9 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
525 ep = sp->ep; 524 ep = sp->ep;
526 525
527 SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", 526 SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
528 __FUNCTION__, sk, addrs, addrcnt); 527 __func__, sk, addrs, addrcnt);
529 528
530 list_for_each(pos, &ep->asocs) { 529 list_for_each_entry(asoc, &ep->asocs, asocs) {
531 asoc = list_entry(pos, struct sctp_association, asocs);
532 530
533 if (!asoc->peer.asconf_capable) 531 if (!asoc->peer.asconf_capable)
534 continue; 532 continue;
@@ -699,7 +697,6 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
699 union sctp_addr *laddr; 697 union sctp_addr *laddr;
700 void *addr_buf; 698 void *addr_buf;
701 struct sctp_af *af; 699 struct sctp_af *af;
702 struct list_head *pos, *pos1;
703 struct sctp_sockaddr_entry *saddr; 700 struct sctp_sockaddr_entry *saddr;
704 int i; 701 int i;
705 int retval = 0; 702 int retval = 0;
@@ -711,10 +708,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
711 ep = sp->ep; 708 ep = sp->ep;
712 709
713 SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", 710 SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
714 __FUNCTION__, sk, addrs, addrcnt); 711 __func__, sk, addrs, addrcnt);
715 712
716 list_for_each(pos, &ep->asocs) { 713 list_for_each_entry(asoc, &ep->asocs, asocs) {
717 asoc = list_entry(pos, struct sctp_association, asocs);
718 714
719 if (!asoc->peer.asconf_capable) 715 if (!asoc->peer.asconf_capable)
720 continue; 716 continue;
@@ -787,9 +783,8 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
787 * as some of the addresses in the bind address list are 783 * as some of the addresses in the bind address list are
788 * about to be deleted and cannot be used as source addresses. 784 * about to be deleted and cannot be used as source addresses.
789 */ 785 */
790 list_for_each(pos1, &asoc->peer.transport_addr_list) { 786 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
791 transport = list_entry(pos1, struct sctp_transport, 787 transports) {
792 transports);
793 dst_release(transport->dst); 788 dst_release(transport->dst);
794 sctp_transport_route(transport, NULL, 789 sctp_transport_route(transport, NULL,
795 sctp_sk(asoc->base.sk)); 790 sctp_sk(asoc->base.sk));
@@ -1197,7 +1192,7 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1197 struct sockaddr *kaddrs; 1192 struct sockaddr *kaddrs;
1198 1193
1199 SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n", 1194 SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n",
1200 __FUNCTION__, sk, addrs, addrs_size); 1195 __func__, sk, addrs, addrs_size);
1201 1196
1202 if (unlikely(addrs_size <= 0)) 1197 if (unlikely(addrs_size <= 0))
1203 return -EINVAL; 1198 return -EINVAL;
@@ -1397,7 +1392,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1397 long timeo; 1392 long timeo;
1398 __u16 sinfo_flags = 0; 1393 __u16 sinfo_flags = 0;
1399 struct sctp_datamsg *datamsg; 1394 struct sctp_datamsg *datamsg;
1400 struct list_head *pos;
1401 int msg_flags = msg->msg_flags; 1395 int msg_flags = msg->msg_flags;
1402 1396
1403 SCTP_DEBUG_PRINTK("sctp_sendmsg(sk: %p, msg: %p, msg_len: %zu)\n", 1397 SCTP_DEBUG_PRINTK("sctp_sendmsg(sk: %p, msg: %p, msg_len: %zu)\n",
@@ -1727,9 +1721,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1727 } 1721 }
1728 1722
1729 /* Now send the (possibly) fragmented message. */ 1723 /* Now send the (possibly) fragmented message. */
1730 list_for_each(pos, &datamsg->chunks) { 1724 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
1731 chunk = list_entry(pos, struct sctp_chunk, frag_list); 1725 sctp_chunk_hold(chunk);
1732 sctp_datamsg_track(chunk);
1733 1726
1734 /* Do accounting for the write space. */ 1727 /* Do accounting for the write space. */
1735 sctp_set_owner_w(chunk); 1728 sctp_set_owner_w(chunk);
@@ -1748,7 +1741,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1748 SCTP_DEBUG_PRINTK("We sent primitively.\n"); 1741 SCTP_DEBUG_PRINTK("We sent primitively.\n");
1749 } 1742 }
1750 1743
1751 sctp_datamsg_free(datamsg); 1744 sctp_datamsg_put(datamsg);
1752 if (err) 1745 if (err)
1753 goto out_free; 1746 goto out_free;
1754 else 1747 else
@@ -2301,11 +2294,8 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2301 * transport. 2294 * transport.
2302 */ 2295 */
2303 if (!trans && asoc) { 2296 if (!trans && asoc) {
2304 struct list_head *pos; 2297 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2305 2298 transports) {
2306 list_for_each(pos, &asoc->peer.transport_addr_list) {
2307 trans = list_entry(pos, struct sctp_transport,
2308 transports);
2309 sctp_apply_peer_addr_params(&params, trans, asoc, sp, 2299 sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2310 hb_change, pmtud_change, 2300 hb_change, pmtud_change,
2311 sackdelay_change); 2301 sackdelay_change);
@@ -2396,11 +2386,8 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
2396 2386
2397 /* If change is for association, also apply to each transport. */ 2387 /* If change is for association, also apply to each transport. */
2398 if (asoc) { 2388 if (asoc) {
2399 struct list_head *pos; 2389 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2400 2390 transports) {
2401 list_for_each(pos, &asoc->peer.transport_addr_list) {
2402 trans = list_entry(pos, struct sctp_transport,
2403 transports);
2404 if (params.assoc_value) { 2391 if (params.assoc_value) {
2405 trans->sackdelay = 2392 trans->sackdelay =
2406 msecs_to_jiffies(params.assoc_value); 2393 msecs_to_jiffies(params.assoc_value);
@@ -2632,13 +2619,10 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o
2632 if (assocparams.sasoc_asocmaxrxt != 0) { 2619 if (assocparams.sasoc_asocmaxrxt != 0) {
2633 __u32 path_sum = 0; 2620 __u32 path_sum = 0;
2634 int paths = 0; 2621 int paths = 0;
2635 struct list_head *pos;
2636 struct sctp_transport *peer_addr; 2622 struct sctp_transport *peer_addr;
2637 2623
2638 list_for_each(pos, &asoc->peer.transport_addr_list) { 2624 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
2639 peer_addr = list_entry(pos, 2625 transports) {
2640 struct sctp_transport,
2641 transports);
2642 path_sum += peer_addr->pathmaxrxt; 2626 path_sum += peer_addr->pathmaxrxt;
2643 paths++; 2627 paths++;
2644 } 2628 }
@@ -2716,7 +2700,6 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, int op
2716static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optlen) 2700static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optlen)
2717{ 2701{
2718 struct sctp_association *asoc; 2702 struct sctp_association *asoc;
2719 struct list_head *pos;
2720 struct sctp_sock *sp = sctp_sk(sk); 2703 struct sctp_sock *sp = sctp_sk(sk);
2721 int val; 2704 int val;
2722 2705
@@ -2729,8 +2712,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl
2729 sp->user_frag = val; 2712 sp->user_frag = val;
2730 2713
2731 /* Update the frag_point of the existing associations. */ 2714 /* Update the frag_point of the existing associations. */
2732 list_for_each(pos, &(sp->ep->asocs)) { 2715 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
2733 asoc = list_entry(pos, struct sctp_association, asocs);
2734 asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); 2716 asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu);
2735 } 2717 }
2736 2718
@@ -3302,7 +3284,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
3302 sctp_lock_sock(sk); 3284 sctp_lock_sock(sk);
3303 3285
3304 SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n", 3286 SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n",
3305 __FUNCTION__, sk, addr, addr_len); 3287 __func__, sk, addr, addr_len);
3306 3288
3307 /* Validate addr_len before calling common connect/connectx routine. */ 3289 /* Validate addr_len before calling common connect/connectx routine. */
3308 af = sctp_get_af_specific(addr->sa_family); 3290 af = sctp_get_af_specific(addr->sa_family);
@@ -3823,7 +3805,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
3823 goto out; 3805 goto out;
3824 } 3806 }
3825 3807
3826 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __FUNCTION__, sk, asoc); 3808 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __func__, sk, asoc);
3827 3809
3828 retval = sctp_do_peeloff(asoc, &newsock); 3810 retval = sctp_do_peeloff(asoc, &newsock);
3829 if (retval < 0) 3811 if (retval < 0)
@@ -3837,7 +3819,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
3837 } 3819 }
3838 3820
3839 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n", 3821 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n",
3840 __FUNCTION__, sk, asoc, newsock->sk, retval); 3822 __func__, sk, asoc, newsock->sk, retval);
3841 3823
3842 /* Return the fd mapped to the new socket. */ 3824 /* Return the fd mapped to the new socket. */
3843 peeloff.sd = retval; 3825 peeloff.sd = retval;
@@ -4151,7 +4133,6 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
4151 int __user *optlen) 4133 int __user *optlen)
4152{ 4134{
4153 struct sctp_association *asoc; 4135 struct sctp_association *asoc;
4154 struct list_head *pos;
4155 int cnt = 0; 4136 int cnt = 0;
4156 struct sctp_getaddrs_old getaddrs; 4137 struct sctp_getaddrs_old getaddrs;
4157 struct sctp_transport *from; 4138 struct sctp_transport *from;
@@ -4176,8 +4157,8 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
4176 return -EINVAL; 4157 return -EINVAL;
4177 4158
4178 to = (void __user *)getaddrs.addrs; 4159 to = (void __user *)getaddrs.addrs;
4179 list_for_each(pos, &asoc->peer.transport_addr_list) { 4160 list_for_each_entry(from, &asoc->peer.transport_addr_list,
4180 from = list_entry(pos, struct sctp_transport, transports); 4161 transports) {
4181 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4162 memcpy(&temp, &from->ipaddr, sizeof(temp));
4182 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4163 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4183 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; 4164 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
@@ -4200,7 +4181,6 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4200 char __user *optval, int __user *optlen) 4181 char __user *optval, int __user *optlen)
4201{ 4182{
4202 struct sctp_association *asoc; 4183 struct sctp_association *asoc;
4203 struct list_head *pos;
4204 int cnt = 0; 4184 int cnt = 0;
4205 struct sctp_getaddrs getaddrs; 4185 struct sctp_getaddrs getaddrs;
4206 struct sctp_transport *from; 4186 struct sctp_transport *from;
@@ -4225,8 +4205,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4225 to = optval + offsetof(struct sctp_getaddrs,addrs); 4205 to = optval + offsetof(struct sctp_getaddrs,addrs);
4226 space_left = len - offsetof(struct sctp_getaddrs,addrs); 4206 space_left = len - offsetof(struct sctp_getaddrs,addrs);
4227 4207
4228 list_for_each(pos, &asoc->peer.transport_addr_list) { 4208 list_for_each_entry(from, &asoc->peer.transport_addr_list,
4229 from = list_entry(pos, struct sctp_transport, transports); 4209 transports) {
4230 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4210 memcpy(&temp, &from->ipaddr, sizeof(temp));
4231 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4211 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4232 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; 4212 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
@@ -5761,8 +5741,8 @@ static struct sctp_bind_bucket *sctp_bucket_create(
5761 struct sctp_bind_bucket *pp; 5741 struct sctp_bind_bucket *pp;
5762 5742
5763 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 5743 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
5764 SCTP_DBG_OBJCNT_INC(bind_bucket);
5765 if (pp) { 5744 if (pp) {
5745 SCTP_DBG_OBJCNT_INC(bind_bucket);
5766 pp->port = snum; 5746 pp->port = snum;
5767 pp->fastreuse = 0; 5747 pp->fastreuse = 0;
5768 INIT_HLIST_HEAD(&pp->owner); 5748 INIT_HLIST_HEAD(&pp->owner);
@@ -6194,11 +6174,9 @@ do_nonblock:
6194void sctp_write_space(struct sock *sk) 6174void sctp_write_space(struct sock *sk)
6195{ 6175{
6196 struct sctp_association *asoc; 6176 struct sctp_association *asoc;
6197 struct list_head *pos;
6198 6177
6199 /* Wake up the tasks in each wait queue. */ 6178 /* Wake up the tasks in each wait queue. */
6200 list_for_each(pos, &((sctp_sk(sk))->ep->asocs)) { 6179 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
6201 asoc = list_entry(pos, struct sctp_association, asocs);
6202 __sctp_write_space(asoc); 6180 __sctp_write_space(asoc);
6203 } 6181 }
6204} 6182}
@@ -6234,7 +6212,7 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
6234 long current_timeo = *timeo_p; 6212 long current_timeo = *timeo_p;
6235 DEFINE_WAIT(wait); 6213 DEFINE_WAIT(wait);
6236 6214
6237 SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __FUNCTION__, asoc, 6215 SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __func__, asoc,
6238 (long)(*timeo_p)); 6216 (long)(*timeo_p));
6239 6217
6240 /* Increment the association's refcnt. */ 6218 /* Increment the association's refcnt. */
@@ -6514,8 +6492,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6514} 6492}
6515 6493
6516 6494
6517DEFINE_PROTO_INUSE(sctp)
6518
6519/* This proto struct describes the ULP interface for SCTP. */ 6495/* This proto struct describes the ULP interface for SCTP. */
6520struct proto sctp_prot = { 6496struct proto sctp_prot = {
6521 .name = "SCTP", 6497 .name = "SCTP",
@@ -6545,11 +6521,9 @@ struct proto sctp_prot = {
6545 .enter_memory_pressure = sctp_enter_memory_pressure, 6521 .enter_memory_pressure = sctp_enter_memory_pressure,
6546 .memory_allocated = &sctp_memory_allocated, 6522 .memory_allocated = &sctp_memory_allocated,
6547 .sockets_allocated = &sctp_sockets_allocated, 6523 .sockets_allocated = &sctp_sockets_allocated,
6548 REF_PROTO_INUSE(sctp)
6549}; 6524};
6550 6525
6551#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 6526#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
6552DEFINE_PROTO_INUSE(sctpv6)
6553 6527
6554struct proto sctpv6_prot = { 6528struct proto sctpv6_prot = {
6555 .name = "SCTPv6", 6529 .name = "SCTPv6",
@@ -6579,6 +6553,5 @@ struct proto sctpv6_prot = {
6579 .enter_memory_pressure = sctp_enter_memory_pressure, 6553 .enter_memory_pressure = sctp_enter_memory_pressure,
6580 .memory_allocated = &sctp_memory_allocated, 6554 .memory_allocated = &sctp_memory_allocated,
6581 .sockets_allocated = &sctp_sockets_allocated, 6555 .sockets_allocated = &sctp_sockets_allocated,
6582 REF_PROTO_INUSE(sctpv6)
6583}; 6556};
6584#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 6557#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index d9f8af852b56..f4938f6c5abe 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -260,7 +260,7 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
260 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 260 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
261 printk(KERN_WARNING "%s: Reported pmtu %d too low, " 261 printk(KERN_WARNING "%s: Reported pmtu %d too low, "
262 "using default minimum of %d\n", 262 "using default minimum of %d\n",
263 __FUNCTION__, pmtu, 263 __func__, pmtu,
264 SCTP_DEFAULT_MINSEGMENT); 264 SCTP_DEFAULT_MINSEGMENT);
265 /* Use default minimum segment size and disable 265 /* Use default minimum segment size and disable
266 * pmtu discovery on this transport. 266 * pmtu discovery on this transport.
@@ -388,7 +388,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
388 tp->rto_pending = 0; 388 tp->rto_pending = 0;
389 389
390 SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " 390 SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
391 "rttvar: %d, rto: %ld\n", __FUNCTION__, 391 "rttvar: %d, rto: %ld\n", __func__,
392 tp, rtt, tp->srtt, tp->rttvar, tp->rto); 392 tp, rtt, tp->srtt, tp->rttvar, tp->rto);
393} 393}
394 394
@@ -434,7 +434,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
434 SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, " 434 SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, "
435 "bytes_acked: %d, cwnd: %d, ssthresh: %d, " 435 "bytes_acked: %d, cwnd: %d, ssthresh: %d, "
436 "flight_size: %d, pba: %d\n", 436 "flight_size: %d, pba: %d\n",
437 __FUNCTION__, 437 __func__,
438 transport, bytes_acked, cwnd, 438 transport, bytes_acked, cwnd,
439 ssthresh, flight_size, pba); 439 ssthresh, flight_size, pba);
440 } else { 440 } else {
@@ -460,7 +460,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
460 SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: " 460 SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: "
461 "transport: %p, bytes_acked: %d, cwnd: %d, " 461 "transport: %p, bytes_acked: %d, cwnd: %d, "
462 "ssthresh: %d, flight_size: %d, pba: %d\n", 462 "ssthresh: %d, flight_size: %d, pba: %d\n",
463 __FUNCTION__, 463 __func__,
464 transport, bytes_acked, cwnd, 464 transport, bytes_acked, cwnd,
465 ssthresh, flight_size, pba); 465 ssthresh, flight_size, pba);
466 } 466 }
@@ -546,7 +546,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
546 546
547 transport->partial_bytes_acked = 0; 547 transport->partial_bytes_acked = 0;
548 SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " 548 SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: "
549 "%d ssthresh: %d\n", __FUNCTION__, 549 "%d ssthresh: %d\n", __func__,
550 transport, reason, 550 transport, reason,
551 transport->cwnd, transport->ssthresh); 551 transport->cwnd, transport->ssthresh);
552} 552}
diff --git a/net/socket.c b/net/socket.c
index 9d3fbfbc8535..9b5c917f8a6b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -857,7 +857,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
857 857
858 sock = file->private_data; 858 sock = file->private_data;
859 sk = sock->sk; 859 sk = sock->sk;
860 net = sk->sk_net; 860 net = sock_net(sk);
861 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { 861 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
862 err = dev_ioctl(net, cmd, argp); 862 err = dev_ioctl(net, cmd, argp);
863 } else 863 } else
@@ -1375,7 +1375,7 @@ asmlinkage long sys_listen(int fd, int backlog)
1375 1375
1376 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1376 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1377 if (sock) { 1377 if (sock) {
1378 somaxconn = sock->sk->sk_net->sysctl_somaxconn; 1378 somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
1379 if ((unsigned)backlog > somaxconn) 1379 if ((unsigned)backlog > somaxconn)
1380 backlog = somaxconn; 1380 backlog = somaxconn;
1381 1381
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 6dac38792288..5828e5c060ca 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -625,7 +625,7 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
625 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 625 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
626 if (!gss_auth->mech) { 626 if (!gss_auth->mech) {
627 printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n", 627 printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n",
628 __FUNCTION__, flavor); 628 __func__, flavor);
629 goto err_free; 629 goto err_free;
630 } 630 }
631 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 631 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 8834d68972cb..7b96ff38002f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -43,7 +43,7 @@
43 43
44#define dprint_status(t) \ 44#define dprint_status(t) \
45 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ 45 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
46 __FUNCTION__, t->tk_status) 46 __func__, t->tk_status)
47 47
48/* 48/*
49 * All RPC clients are linked into this list 49 * All RPC clients are linked into this list
@@ -368,7 +368,7 @@ out_no_path:
368out_no_stats: 368out_no_stats:
369 kfree(new); 369 kfree(new);
370out_no_clnt: 370out_no_clnt:
371 dprintk("RPC: %s: returned error %d\n", __FUNCTION__, err); 371 dprintk("RPC: %s: returned error %d\n", __func__, err);
372 return ERR_PTR(err); 372 return ERR_PTR(err);
373} 373}
374EXPORT_SYMBOL_GPL(rpc_clone_client); 374EXPORT_SYMBOL_GPL(rpc_clone_client);
@@ -752,7 +752,7 @@ call_reserveresult(struct rpc_task *task)
752 } 752 }
753 753
754 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 754 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
755 __FUNCTION__, status); 755 __func__, status);
756 rpc_exit(task, -EIO); 756 rpc_exit(task, -EIO);
757 return; 757 return;
758 } 758 }
@@ -763,7 +763,7 @@ call_reserveresult(struct rpc_task *task)
763 */ 763 */
764 if (task->tk_rqstp) { 764 if (task->tk_rqstp) {
765 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 765 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
766 __FUNCTION__, status); 766 __func__, status);
767 xprt_release(task); 767 xprt_release(task);
768 } 768 }
769 769
@@ -775,7 +775,7 @@ call_reserveresult(struct rpc_task *task)
775 break; 775 break;
776 default: 776 default:
777 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 777 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
778 __FUNCTION__, status); 778 __func__, status);
779 break; 779 break;
780 } 780 }
781 rpc_exit(task, status); 781 rpc_exit(task, status);
@@ -1323,7 +1323,7 @@ call_verify(struct rpc_task *task)
1323 * undefined results 1323 * undefined results
1324 */ 1324 */
1325 dprintk("RPC: %5u %s: XDR representation not a multiple of" 1325 dprintk("RPC: %5u %s: XDR representation not a multiple of"
1326 " 4 bytes: 0x%x\n", task->tk_pid, __FUNCTION__, 1326 " 4 bytes: 0x%x\n", task->tk_pid, __func__,
1327 task->tk_rqstp->rq_rcv_buf.len); 1327 task->tk_rqstp->rq_rcv_buf.len);
1328 goto out_eio; 1328 goto out_eio;
1329 } 1329 }
@@ -1333,7 +1333,7 @@ call_verify(struct rpc_task *task)
1333 1333
1334 if ((n = ntohl(*p++)) != RPC_REPLY) { 1334 if ((n = ntohl(*p++)) != RPC_REPLY) {
1335 dprintk("RPC: %5u %s: not an RPC reply: %x\n", 1335 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
1336 task->tk_pid, __FUNCTION__, n); 1336 task->tk_pid, __func__, n);
1337 goto out_garbage; 1337 goto out_garbage;
1338 } 1338 }
1339 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1339 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
@@ -1345,13 +1345,13 @@ call_verify(struct rpc_task *task)
1345 case RPC_MISMATCH: 1345 case RPC_MISMATCH:
1346 dprintk("RPC: %5u %s: RPC call version " 1346 dprintk("RPC: %5u %s: RPC call version "
1347 "mismatch!\n", 1347 "mismatch!\n",
1348 task->tk_pid, __FUNCTION__); 1348 task->tk_pid, __func__);
1349 error = -EPROTONOSUPPORT; 1349 error = -EPROTONOSUPPORT;
1350 goto out_err; 1350 goto out_err;
1351 default: 1351 default:
1352 dprintk("RPC: %5u %s: RPC call rejected, " 1352 dprintk("RPC: %5u %s: RPC call rejected, "
1353 "unknown error: %x\n", 1353 "unknown error: %x\n",
1354 task->tk_pid, __FUNCTION__, n); 1354 task->tk_pid, __func__, n);
1355 goto out_eio; 1355 goto out_eio;
1356 } 1356 }
1357 if (--len < 0) 1357 if (--len < 0)
@@ -1365,7 +1365,7 @@ call_verify(struct rpc_task *task)
1365 break; 1365 break;
1366 task->tk_cred_retry--; 1366 task->tk_cred_retry--;
1367 dprintk("RPC: %5u %s: retry stale creds\n", 1367 dprintk("RPC: %5u %s: retry stale creds\n",
1368 task->tk_pid, __FUNCTION__); 1368 task->tk_pid, __func__);
1369 rpcauth_invalcred(task); 1369 rpcauth_invalcred(task);
1370 /* Ensure we obtain a new XID! */ 1370 /* Ensure we obtain a new XID! */
1371 xprt_release(task); 1371 xprt_release(task);
@@ -1378,7 +1378,7 @@ call_verify(struct rpc_task *task)
1378 break; 1378 break;
1379 task->tk_garb_retry--; 1379 task->tk_garb_retry--;
1380 dprintk("RPC: %5u %s: retry garbled creds\n", 1380 dprintk("RPC: %5u %s: retry garbled creds\n",
1381 task->tk_pid, __FUNCTION__); 1381 task->tk_pid, __func__);
1382 task->tk_action = call_bind; 1382 task->tk_action = call_bind;
1383 goto out_retry; 1383 goto out_retry;
1384 case RPC_AUTH_TOOWEAK: 1384 case RPC_AUTH_TOOWEAK:
@@ -1387,16 +1387,16 @@ call_verify(struct rpc_task *task)
1387 break; 1387 break;
1388 default: 1388 default:
1389 dprintk("RPC: %5u %s: unknown auth error: %x\n", 1389 dprintk("RPC: %5u %s: unknown auth error: %x\n",
1390 task->tk_pid, __FUNCTION__, n); 1390 task->tk_pid, __func__, n);
1391 error = -EIO; 1391 error = -EIO;
1392 } 1392 }
1393 dprintk("RPC: %5u %s: call rejected %d\n", 1393 dprintk("RPC: %5u %s: call rejected %d\n",
1394 task->tk_pid, __FUNCTION__, n); 1394 task->tk_pid, __func__, n);
1395 goto out_err; 1395 goto out_err;
1396 } 1396 }
1397 if (!(p = rpcauth_checkverf(task, p))) { 1397 if (!(p = rpcauth_checkverf(task, p))) {
1398 dprintk("RPC: %5u %s: auth check failed\n", 1398 dprintk("RPC: %5u %s: auth check failed\n",
1399 task->tk_pid, __FUNCTION__); 1399 task->tk_pid, __func__);
1400 goto out_garbage; /* bad verifier, retry */ 1400 goto out_garbage; /* bad verifier, retry */
1401 } 1401 }
1402 len = p - (__be32 *)iov->iov_base - 1; 1402 len = p - (__be32 *)iov->iov_base - 1;
@@ -1407,14 +1407,14 @@ call_verify(struct rpc_task *task)
1407 return p; 1407 return p;
1408 case RPC_PROG_UNAVAIL: 1408 case RPC_PROG_UNAVAIL:
1409 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", 1409 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
1410 task->tk_pid, __FUNCTION__, 1410 task->tk_pid, __func__,
1411 (unsigned int)task->tk_client->cl_prog, 1411 (unsigned int)task->tk_client->cl_prog,
1412 task->tk_client->cl_server); 1412 task->tk_client->cl_server);
1413 error = -EPFNOSUPPORT; 1413 error = -EPFNOSUPPORT;
1414 goto out_err; 1414 goto out_err;
1415 case RPC_PROG_MISMATCH: 1415 case RPC_PROG_MISMATCH:
1416 dprintk("RPC: %5u %s: program %u, version %u unsupported by " 1416 dprintk("RPC: %5u %s: program %u, version %u unsupported by "
1417 "server %s\n", task->tk_pid, __FUNCTION__, 1417 "server %s\n", task->tk_pid, __func__,
1418 (unsigned int)task->tk_client->cl_prog, 1418 (unsigned int)task->tk_client->cl_prog,
1419 (unsigned int)task->tk_client->cl_vers, 1419 (unsigned int)task->tk_client->cl_vers,
1420 task->tk_client->cl_server); 1420 task->tk_client->cl_server);
@@ -1423,7 +1423,7 @@ call_verify(struct rpc_task *task)
1423 case RPC_PROC_UNAVAIL: 1423 case RPC_PROC_UNAVAIL:
1424 dprintk("RPC: %5u %s: proc %p unsupported by program %u, " 1424 dprintk("RPC: %5u %s: proc %p unsupported by program %u, "
1425 "version %u on server %s\n", 1425 "version %u on server %s\n",
1426 task->tk_pid, __FUNCTION__, 1426 task->tk_pid, __func__,
1427 task->tk_msg.rpc_proc, 1427 task->tk_msg.rpc_proc,
1428 task->tk_client->cl_prog, 1428 task->tk_client->cl_prog,
1429 task->tk_client->cl_vers, 1429 task->tk_client->cl_vers,
@@ -1432,11 +1432,11 @@ call_verify(struct rpc_task *task)
1432 goto out_err; 1432 goto out_err;
1433 case RPC_GARBAGE_ARGS: 1433 case RPC_GARBAGE_ARGS:
1434 dprintk("RPC: %5u %s: server saw garbage\n", 1434 dprintk("RPC: %5u %s: server saw garbage\n",
1435 task->tk_pid, __FUNCTION__); 1435 task->tk_pid, __func__);
1436 break; /* retry */ 1436 break; /* retry */
1437 default: 1437 default:
1438 dprintk("RPC: %5u %s: server accept status: %x\n", 1438 dprintk("RPC: %5u %s: server accept status: %x\n",
1439 task->tk_pid, __FUNCTION__, n); 1439 task->tk_pid, __func__, n);
1440 /* Also retry */ 1440 /* Also retry */
1441 } 1441 }
1442 1442
@@ -1445,7 +1445,7 @@ out_garbage:
1445 if (task->tk_garb_retry) { 1445 if (task->tk_garb_retry) {
1446 task->tk_garb_retry--; 1446 task->tk_garb_retry--;
1447 dprintk("RPC: %5u %s: retrying\n", 1447 dprintk("RPC: %5u %s: retrying\n",
1448 task->tk_pid, __FUNCTION__); 1448 task->tk_pid, __func__);
1449 task->tk_action = call_bind; 1449 task->tk_action = call_bind;
1450out_retry: 1450out_retry:
1451 return ERR_PTR(-EAGAIN); 1451 return ERR_PTR(-EAGAIN);
@@ -1455,11 +1455,11 @@ out_eio:
1455out_err: 1455out_err:
1456 rpc_exit(task, error); 1456 rpc_exit(task, error);
1457 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, 1457 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
1458 __FUNCTION__, error); 1458 __func__, error);
1459 return ERR_PTR(error); 1459 return ERR_PTR(error);
1460out_overflow: 1460out_overflow:
1461 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, 1461 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
1462 __FUNCTION__); 1462 __func__);
1463 goto out_garbage; 1463 goto out_garbage;
1464} 1464}
1465 1465
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 1b395a41a8b2..5a9b0e7828cd 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -479,13 +479,13 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
479 mnt = rpc_get_mount(); 479 mnt = rpc_get_mount();
480 if (IS_ERR(mnt)) { 480 if (IS_ERR(mnt)) {
481 printk(KERN_WARNING "%s: %s failed to mount " 481 printk(KERN_WARNING "%s: %s failed to mount "
482 "pseudofilesystem \n", __FILE__, __FUNCTION__); 482 "pseudofilesystem \n", __FILE__, __func__);
483 return PTR_ERR(mnt); 483 return PTR_ERR(mnt);
484 } 484 }
485 485
486 if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) { 486 if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
487 printk(KERN_WARNING "%s: %s failed to find path %s\n", 487 printk(KERN_WARNING "%s: %s failed to find path %s\n",
488 __FILE__, __FUNCTION__, path); 488 __FILE__, __func__, path);
489 rpc_put_mount(); 489 rpc_put_mount();
490 return -ENOENT; 490 return -ENOENT;
491 } 491 }
@@ -604,7 +604,7 @@ rpc_populate(struct dentry *parent,
604out_bad: 604out_bad:
605 mutex_unlock(&dir->i_mutex); 605 mutex_unlock(&dir->i_mutex);
606 printk(KERN_WARNING "%s: %s failed to populate directory %s\n", 606 printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
607 __FILE__, __FUNCTION__, parent->d_name.name); 607 __FILE__, __func__, parent->d_name.name);
608 return -ENOMEM; 608 return -ENOMEM;
609} 609}
610 610
@@ -623,7 +623,7 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry)
623 return 0; 623 return 0;
624out_err: 624out_err:
625 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", 625 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
626 __FILE__, __FUNCTION__, dentry->d_name.name); 626 __FILE__, __func__, dentry->d_name.name);
627 return -ENOMEM; 627 return -ENOMEM;
628} 628}
629 629
@@ -715,7 +715,7 @@ err_depopulate:
715err_dput: 715err_dput:
716 dput(dentry); 716 dput(dentry);
717 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n", 717 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
718 __FILE__, __FUNCTION__, path, error); 718 __FILE__, __func__, path, error);
719 dentry = ERR_PTR(error); 719 dentry = ERR_PTR(error);
720 goto out; 720 goto out;
721} 721}
@@ -804,7 +804,7 @@ err_dput:
804 dput(dentry); 804 dput(dentry);
805 dentry = ERR_PTR(-ENOMEM); 805 dentry = ERR_PTR(-ENOMEM);
806 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", 806 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
807 __FILE__, __FUNCTION__, parent->d_name.name, name, 807 __FILE__, __func__, parent->d_name.name, name,
808 -ENOMEM); 808 -ENOMEM);
809 goto out; 809 goto out;
810} 810}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3164a0871cf0..56aa018dce3a 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -224,7 +224,7 @@ int rpcb_getport_sync(struct sockaddr_in *sin, u32 prog, u32 vers, int prot)
224 int status; 224 int status;
225 225
226 dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n", 226 dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n",
227 __FUNCTION__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); 227 __func__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);
228 228
229 rpcb_clnt = rpcb_create(NULL, (struct sockaddr *)sin, 229 rpcb_clnt = rpcb_create(NULL, (struct sockaddr *)sin,
230 sizeof(*sin), prot, 2, 0); 230 sizeof(*sin), prot, 2, 0);
@@ -283,7 +283,7 @@ void rpcb_getport_async(struct rpc_task *task)
283 struct rpcb_info *info; 283 struct rpcb_info *info;
284 284
285 dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", 285 dprintk("RPC: %5u %s(%s, %u, %u, %d)\n",
286 task->tk_pid, __FUNCTION__, 286 task->tk_pid, __func__,
287 clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); 287 clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot);
288 288
289 /* Autobind on cloned rpc clients is discouraged */ 289 /* Autobind on cloned rpc clients is discouraged */
@@ -292,7 +292,7 @@ void rpcb_getport_async(struct rpc_task *task)
292 if (xprt_test_and_set_binding(xprt)) { 292 if (xprt_test_and_set_binding(xprt)) {
293 status = -EAGAIN; /* tell caller to check again */ 293 status = -EAGAIN; /* tell caller to check again */
294 dprintk("RPC: %5u %s: waiting for another binder\n", 294 dprintk("RPC: %5u %s: waiting for another binder\n",
295 task->tk_pid, __FUNCTION__); 295 task->tk_pid, __func__);
296 goto bailout_nowake; 296 goto bailout_nowake;
297 } 297 }
298 298
@@ -304,7 +304,7 @@ void rpcb_getport_async(struct rpc_task *task)
304 if (xprt_bound(xprt)) { 304 if (xprt_bound(xprt)) {
305 status = 0; 305 status = 0;
306 dprintk("RPC: %5u %s: already bound\n", 306 dprintk("RPC: %5u %s: already bound\n",
307 task->tk_pid, __FUNCTION__); 307 task->tk_pid, __func__);
308 goto bailout_nofree; 308 goto bailout_nofree;
309 } 309 }
310 310
@@ -321,27 +321,27 @@ void rpcb_getport_async(struct rpc_task *task)
321 default: 321 default:
322 status = -EAFNOSUPPORT; 322 status = -EAFNOSUPPORT;
323 dprintk("RPC: %5u %s: bad address family\n", 323 dprintk("RPC: %5u %s: bad address family\n",
324 task->tk_pid, __FUNCTION__); 324 task->tk_pid, __func__);
325 goto bailout_nofree; 325 goto bailout_nofree;
326 } 326 }
327 if (info[xprt->bind_index].rpc_proc == NULL) { 327 if (info[xprt->bind_index].rpc_proc == NULL) {
328 xprt->bind_index = 0; 328 xprt->bind_index = 0;
329 status = -EPFNOSUPPORT; 329 status = -EPFNOSUPPORT;
330 dprintk("RPC: %5u %s: no more getport versions available\n", 330 dprintk("RPC: %5u %s: no more getport versions available\n",
331 task->tk_pid, __FUNCTION__); 331 task->tk_pid, __func__);
332 goto bailout_nofree; 332 goto bailout_nofree;
333 } 333 }
334 bind_version = info[xprt->bind_index].rpc_vers; 334 bind_version = info[xprt->bind_index].rpc_vers;
335 335
336 dprintk("RPC: %5u %s: trying rpcbind version %u\n", 336 dprintk("RPC: %5u %s: trying rpcbind version %u\n",
337 task->tk_pid, __FUNCTION__, bind_version); 337 task->tk_pid, __func__, bind_version);
338 338
339 rpcb_clnt = rpcb_create(clnt->cl_server, sap, salen, xprt->prot, 339 rpcb_clnt = rpcb_create(clnt->cl_server, sap, salen, xprt->prot,
340 bind_version, 0); 340 bind_version, 0);
341 if (IS_ERR(rpcb_clnt)) { 341 if (IS_ERR(rpcb_clnt)) {
342 status = PTR_ERR(rpcb_clnt); 342 status = PTR_ERR(rpcb_clnt);
343 dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n", 343 dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n",
344 task->tk_pid, __FUNCTION__, PTR_ERR(rpcb_clnt)); 344 task->tk_pid, __func__, PTR_ERR(rpcb_clnt));
345 goto bailout_nofree; 345 goto bailout_nofree;
346 } 346 }
347 347
@@ -349,7 +349,7 @@ void rpcb_getport_async(struct rpc_task *task)
349 if (!map) { 349 if (!map) {
350 status = -ENOMEM; 350 status = -ENOMEM;
351 dprintk("RPC: %5u %s: no memory available\n", 351 dprintk("RPC: %5u %s: no memory available\n",
352 task->tk_pid, __FUNCTION__); 352 task->tk_pid, __func__);
353 goto bailout_nofree; 353 goto bailout_nofree;
354 } 354 }
355 map->r_prog = clnt->cl_prog; 355 map->r_prog = clnt->cl_prog;
@@ -366,7 +366,7 @@ void rpcb_getport_async(struct rpc_task *task)
366 if (IS_ERR(child)) { 366 if (IS_ERR(child)) {
367 status = -EIO; 367 status = -EIO;
368 dprintk("RPC: %5u %s: rpc_run_task failed\n", 368 dprintk("RPC: %5u %s: rpc_run_task failed\n",
369 task->tk_pid, __FUNCTION__); 369 task->tk_pid, __func__);
370 goto bailout; 370 goto bailout;
371 } 371 }
372 rpc_put_task(child); 372 rpc_put_task(child);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 30e7ac243a90..613daf8c1ff7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1359,7 +1359,7 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
1359 nloop++; 1359 nloop++;
1360 } while (err == -EADDRINUSE && nloop != 2); 1360 } while (err == -EADDRINUSE && nloop != 2);
1361 dprintk("RPC: %s "NIPQUAD_FMT":%u: %s (%d)\n", 1361 dprintk("RPC: %s "NIPQUAD_FMT":%u: %s (%d)\n",
1362 __FUNCTION__, NIPQUAD(myaddr.sin_addr), 1362 __func__, NIPQUAD(myaddr.sin_addr),
1363 port, err ? "failed" : "ok", err); 1363 port, err ? "failed" : "ok", err);
1364 return err; 1364 return err;
1365} 1365}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index d2d7d32c02c7..740aac5cdfb6 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -48,16 +48,8 @@
48#include "subscr.h" 48#include "subscr.h"
49#include "config.h" 49#include "config.h"
50 50
51int tipc_eth_media_start(void);
52void tipc_eth_media_stop(void);
53int tipc_handler_start(void);
54void tipc_handler_stop(void);
55int tipc_socket_init(void);
56void tipc_socket_stop(void);
57int tipc_netlink_start(void);
58void tipc_netlink_stop(void);
59 51
60#define TIPC_MOD_VER "1.6.2" 52#define TIPC_MOD_VER "1.6.3"
61 53
62#ifndef CONFIG_TIPC_ZONES 54#ifndef CONFIG_TIPC_ZONES
63#define CONFIG_TIPC_ZONES 3 55#define CONFIG_TIPC_ZONES 3
@@ -277,7 +269,6 @@ EXPORT_SYMBOL(tipc_register_media);
277/* TIPC API for external APIs (see tipc_port.h) */ 269/* TIPC API for external APIs (see tipc_port.h) */
278 270
279EXPORT_SYMBOL(tipc_createport_raw); 271EXPORT_SYMBOL(tipc_createport_raw);
280EXPORT_SYMBOL(tipc_set_msg_option);
281EXPORT_SYMBOL(tipc_reject_msg); 272EXPORT_SYMBOL(tipc_reject_msg);
282EXPORT_SYMBOL(tipc_send_buf_fast); 273EXPORT_SYMBOL(tipc_send_buf_fast);
283EXPORT_SYMBOL(tipc_acknowledge); 274EXPORT_SYMBOL(tipc_acknowledge);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index feabca580820..325404fd4eb5 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -180,6 +180,12 @@ extern int tipc_core_start(void);
180extern void tipc_core_stop(void); 180extern void tipc_core_stop(void);
181extern int tipc_core_start_net(void); 181extern int tipc_core_start_net(void);
182extern void tipc_core_stop_net(void); 182extern void tipc_core_stop_net(void);
183extern int tipc_handler_start(void);
184extern void tipc_handler_stop(void);
185extern int tipc_netlink_start(void);
186extern void tipc_netlink_stop(void);
187extern int tipc_socket_init(void);
188extern void tipc_socket_stop(void);
183 189
184static inline int delimit(int val, int min, int max) 190static inline int delimit(int val, int min, int max)
185{ 191{
@@ -310,7 +316,7 @@ static inline struct sk_buff *buf_acquire(u32 size)
310 struct sk_buff *skb; 316 struct sk_buff *skb;
311 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; 317 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
312 318
313 skb = alloc_skb(buf_size, GFP_ATOMIC); 319 skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
314 if (skb) { 320 if (skb) {
315 skb_reserve(skb, BUF_HEADROOM); 321 skb_reserve(skb, BUF_HEADROOM);
316 skb_put(skb, size); 322 skb_put(skb, size);
@@ -328,8 +334,19 @@ static inline struct sk_buff *buf_acquire(u32 size)
328 334
329static inline void buf_discard(struct sk_buff *skb) 335static inline void buf_discard(struct sk_buff *skb)
330{ 336{
331 if (likely(skb != NULL)) 337 kfree_skb(skb);
332 kfree_skb(skb); 338}
339
340/**
341 * buf_linearize - convert a TIPC message buffer into a single contiguous piece
342 * @skb: message buffer
343 *
344 * Returns 0 on success.
345 */
346
347static inline int buf_linearize(struct sk_buff *skb)
348{
349 return skb_linearize(skb);
333} 350}
334 351
335#endif 352#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 3bbef2ab22ae..9cd35eec3e7f 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -101,7 +101,7 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
101 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv; 101 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
102 u32 size; 102 u32 size;
103 103
104 if (dev->nd_net != &init_net) { 104 if (dev_net(dev) != &init_net) {
105 kfree_skb(buf); 105 kfree_skb(buf);
106 return 0; 106 return 0;
107 } 107 }
@@ -198,7 +198,7 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
198 struct eth_bearer *eb_ptr = &eth_bearers[0]; 198 struct eth_bearer *eb_ptr = &eth_bearers[0];
199 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS]; 199 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
200 200
201 if (dev->nd_net != &init_net) 201 if (dev_net(dev) != &init_net)
202 return NOTIFY_DONE; 202 return NOTIFY_DONE;
203 203
204 while ((eb_ptr->dev != dev)) { 204 while ((eb_ptr->dev != dev)) {
diff --git a/net/tipc/link.c b/net/tipc/link.c
index cefa99824c58..2a26a16e269f 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1785,6 +1785,56 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1785 return buf; 1785 return buf;
1786} 1786}
1787 1787
1788/**
1789 * link_recv_buf_validate - validate basic format of received message
1790 *
1791 * This routine ensures a TIPC message has an acceptable header, and at least
1792 * as much data as the header indicates it should. The routine also ensures
1793 * that the entire message header is stored in the main fragment of the message
1794 * buffer, to simplify future access to message header fields.
1795 *
1796 * Note: Having extra info present in the message header or data areas is OK.
1797 * TIPC will ignore the excess, under the assumption that it is optional info
1798 * introduced by a later release of the protocol.
1799 */
1800
1801static int link_recv_buf_validate(struct sk_buff *buf)
1802{
1803 static u32 min_data_hdr_size[8] = {
1804 SHORT_H_SIZE, MCAST_H_SIZE, LONG_H_SIZE, DIR_MSG_H_SIZE,
1805 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1806 };
1807
1808 struct tipc_msg *msg;
1809 u32 tipc_hdr[2];
1810 u32 size;
1811 u32 hdr_size;
1812 u32 min_hdr_size;
1813
1814 if (unlikely(buf->len < MIN_H_SIZE))
1815 return 0;
1816
1817 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1818 if (msg == NULL)
1819 return 0;
1820
1821 if (unlikely(msg_version(msg) != TIPC_VERSION))
1822 return 0;
1823
1824 size = msg_size(msg);
1825 hdr_size = msg_hdr_sz(msg);
1826 min_hdr_size = msg_isdata(msg) ?
1827 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1828
1829 if (unlikely((hdr_size < min_hdr_size) ||
1830 (size < hdr_size) ||
1831 (buf->len < size) ||
1832 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1833 return 0;
1834
1835 return pskb_may_pull(buf, hdr_size);
1836}
1837
1788void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) 1838void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1789{ 1839{
1790 read_lock_bh(&tipc_net_lock); 1840 read_lock_bh(&tipc_net_lock);
@@ -1794,9 +1844,9 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1794 struct link *l_ptr; 1844 struct link *l_ptr;
1795 struct sk_buff *crs; 1845 struct sk_buff *crs;
1796 struct sk_buff *buf = head; 1846 struct sk_buff *buf = head;
1797 struct tipc_msg *msg = buf_msg(buf); 1847 struct tipc_msg *msg;
1798 u32 seq_no = msg_seqno(msg); 1848 u32 seq_no;
1799 u32 ackd = msg_ack(msg); 1849 u32 ackd;
1800 u32 released = 0; 1850 u32 released = 0;
1801 int type; 1851 int type;
1802 1852
@@ -1804,12 +1854,21 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1804 TIPC_SKB_CB(buf)->handle = b_ptr; 1854 TIPC_SKB_CB(buf)->handle = b_ptr;
1805 1855
1806 head = head->next; 1856 head = head->next;
1807 if (unlikely(msg_version(msg) != TIPC_VERSION)) 1857
1858 /* Ensure message is well-formed */
1859
1860 if (unlikely(!link_recv_buf_validate(buf)))
1808 goto cont; 1861 goto cont;
1809#if 0 1862
1810 if (msg_user(msg) != LINK_PROTOCOL) 1863 /* Ensure message data is a single contiguous unit */
1811#endif 1864
1812 msg_dbg(msg,"<REC<"); 1865 if (unlikely(buf_linearize(buf))) {
1866 goto cont;
1867 }
1868
1869 /* Handle arrival of a non-unicast link message */
1870
1871 msg = buf_msg(buf);
1813 1872
1814 if (unlikely(msg_non_seq(msg))) { 1873 if (unlikely(msg_non_seq(msg))) {
1815 link_recv_non_seq(buf); 1874 link_recv_non_seq(buf);
@@ -1820,19 +1879,26 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1820 (msg_destnode(msg) != tipc_own_addr))) 1879 (msg_destnode(msg) != tipc_own_addr)))
1821 goto cont; 1880 goto cont;
1822 1881
1882 /* Locate unicast link endpoint that should handle message */
1883
1823 n_ptr = tipc_node_find(msg_prevnode(msg)); 1884 n_ptr = tipc_node_find(msg_prevnode(msg));
1824 if (unlikely(!n_ptr)) 1885 if (unlikely(!n_ptr))
1825 goto cont; 1886 goto cont;
1826
1827 tipc_node_lock(n_ptr); 1887 tipc_node_lock(n_ptr);
1888
1828 l_ptr = n_ptr->links[b_ptr->identity]; 1889 l_ptr = n_ptr->links[b_ptr->identity];
1829 if (unlikely(!l_ptr)) { 1890 if (unlikely(!l_ptr)) {
1830 tipc_node_unlock(n_ptr); 1891 tipc_node_unlock(n_ptr);
1831 goto cont; 1892 goto cont;
1832 } 1893 }
1833 /* 1894
1834 * Release acked messages 1895 /* Validate message sequence number info */
1835 */ 1896
1897 seq_no = msg_seqno(msg);
1898 ackd = msg_ack(msg);
1899
1900 /* Release acked messages */
1901
1836 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) { 1902 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1837 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported) 1903 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1838 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1904 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
@@ -1851,6 +1917,9 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1851 l_ptr->first_out = crs; 1917 l_ptr->first_out = crs;
1852 l_ptr->out_queue_size -= released; 1918 l_ptr->out_queue_size -= released;
1853 } 1919 }
1920
1921 /* Try sending any messages link endpoint has pending */
1922
1854 if (unlikely(l_ptr->next_out)) 1923 if (unlikely(l_ptr->next_out))
1855 tipc_link_push_queue(l_ptr); 1924 tipc_link_push_queue(l_ptr);
1856 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1925 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
@@ -1860,6 +1929,8 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1860 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1929 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1861 } 1930 }
1862 1931
1932 /* Now (finally!) process the incoming message */
1933
1863protocol_check: 1934protocol_check:
1864 if (likely(link_working_working(l_ptr))) { 1935 if (likely(link_working_working(l_ptr))) {
1865 if (likely(seq_no == mod(l_ptr->next_in_no))) { 1936 if (likely(seq_no == mod(l_ptr->next_in_no))) {
@@ -2832,15 +2903,15 @@ static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2832void tipc_link_set_queue_limits(struct link *l_ptr, u32 window) 2903void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2833{ 2904{
2834 /* Data messages from this node, inclusive FIRST_FRAGM */ 2905 /* Data messages from this node, inclusive FIRST_FRAGM */
2835 l_ptr->queue_limit[DATA_LOW] = window; 2906 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2836 l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4; 2907 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2837 l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5; 2908 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2838 l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6; 2909 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2839 /* Transiting data messages,inclusive FIRST_FRAGM */ 2910 /* Transiting data messages,inclusive FIRST_FRAGM */
2840 l_ptr->queue_limit[DATA_LOW + 4] = 300; 2911 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2841 l_ptr->queue_limit[DATA_MEDIUM + 4] = 600; 2912 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2842 l_ptr->queue_limit[DATA_HIGH + 4] = 900; 2913 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2843 l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200; 2914 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2844 l_ptr->queue_limit[CONN_MANAGER] = 1200; 2915 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2845 l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200; 2916 l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
2846 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; 2917 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 782485468fb2..696a8633df75 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -73,10 +73,10 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str
73 tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg), 73 tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg),
74 msg_fragm_no(msg)); 74 msg_fragm_no(msg));
75 break; 75 break;
76 case DATA_LOW: 76 case TIPC_LOW_IMPORTANCE:
77 case DATA_MEDIUM: 77 case TIPC_MEDIUM_IMPORTANCE:
78 case DATA_HIGH: 78 case TIPC_HIGH_IMPORTANCE:
79 case DATA_CRITICAL: 79 case TIPC_CRITICAL_IMPORTANCE:
80 tipc_printf(buf, "DAT%u:", msg_user(msg)); 80 tipc_printf(buf, "DAT%u:", msg_user(msg));
81 if (msg_short(msg)) { 81 if (msg_short(msg)) {
82 tipc_printf(buf, "CON:"); 82 tipc_printf(buf, "CON:");
@@ -229,10 +229,10 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str
229 switch (usr) { 229 switch (usr) {
230 case CONN_MANAGER: 230 case CONN_MANAGER:
231 case NAME_DISTRIBUTOR: 231 case NAME_DISTRIBUTOR:
232 case DATA_LOW: 232 case TIPC_LOW_IMPORTANCE:
233 case DATA_MEDIUM: 233 case TIPC_MEDIUM_IMPORTANCE:
234 case DATA_HIGH: 234 case TIPC_HIGH_IMPORTANCE:
235 case DATA_CRITICAL: 235 case TIPC_CRITICAL_IMPORTANCE:
236 if (msg_short(msg)) 236 if (msg_short(msg))
237 break; /* No error */ 237 break; /* No error */
238 switch (msg_errcode(msg)) { 238 switch (msg_errcode(msg)) {
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index e9ef6df26562..6ad070d87702 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -40,18 +40,16 @@
40#include "core.h" 40#include "core.h"
41 41
42#define TIPC_VERSION 2 42#define TIPC_VERSION 2
43#define DATA_LOW TIPC_LOW_IMPORTANCE 43
44#define DATA_MEDIUM TIPC_MEDIUM_IMPORTANCE 44#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */
45#define DATA_HIGH TIPC_HIGH_IMPORTANCE
46#define DATA_CRITICAL TIPC_CRITICAL_IMPORTANCE
47#define SHORT_H_SIZE 24 /* Connected,in cluster */
48#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */ 45#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
49#define CONN_MSG_H_SIZE 36 /* Routed connected msgs*/ 46#define LONG_H_SIZE 40 /* Named messages */
50#define LONG_H_SIZE 40 /* Named Messages */
51#define MCAST_H_SIZE 44 /* Multicast messages */ 47#define MCAST_H_SIZE 44 /* Multicast messages */
52#define MAX_H_SIZE 60 /* Inclusive full options */ 48#define INT_H_SIZE 40 /* Internal messages */
49#define MIN_H_SIZE 24 /* Smallest legal TIPC header size */
50#define MAX_H_SIZE 60 /* Largest possible TIPC header size */
51
53#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) 52#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
54#define LINK_CONFIG 13
55 53
56 54
57/* 55/*
@@ -72,8 +70,10 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
72 u32 pos, u32 mask, u32 val) 70 u32 pos, u32 mask, u32 val)
73{ 71{
74 val = (val & mask) << pos; 72 val = (val & mask) << pos;
75 m->hdr[w] &= ~htonl(mask << pos); 73 val = htonl(val);
76 m->hdr[w] |= htonl(val); 74 mask = htonl(mask << pos);
75 m->hdr[w] &= ~mask;
76 m->hdr[w] |= val;
77} 77}
78 78
79/* 79/*
@@ -87,7 +87,7 @@ static inline u32 msg_version(struct tipc_msg *m)
87 87
88static inline void msg_set_version(struct tipc_msg *m) 88static inline void msg_set_version(struct tipc_msg *m)
89{ 89{
90 msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION); 90 msg_set_bits(m, 0, 29, 7, TIPC_VERSION);
91} 91}
92 92
93static inline u32 msg_user(struct tipc_msg *m) 93static inline u32 msg_user(struct tipc_msg *m)
@@ -97,7 +97,7 @@ static inline u32 msg_user(struct tipc_msg *m)
97 97
98static inline u32 msg_isdata(struct tipc_msg *m) 98static inline u32 msg_isdata(struct tipc_msg *m)
99{ 99{
100 return (msg_user(m) <= DATA_CRITICAL); 100 return (msg_user(m) <= TIPC_CRITICAL_IMPORTANCE);
101} 101}
102 102
103static inline void msg_set_user(struct tipc_msg *m, u32 n) 103static inline void msg_set_user(struct tipc_msg *m, u32 n)
@@ -190,18 +190,6 @@ static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
190 msg_set_bits(m, 1, 19, 0x3, n); 190 msg_set_bits(m, 1, 19, 0x3, n);
191} 191}
192 192
193static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz)
194{
195 u32 hsz = msg_hdr_sz(m);
196 char *to = (char *)&m->hdr[hsz/4];
197
198 if ((hsz < DIR_MSG_H_SIZE) || ((hsz + sz) > MAX_H_SIZE))
199 return;
200 msg_set_bits(m, 1, 16, 0x7, (hsz - 28)/4);
201 msg_set_hdr_sz(m, hsz + sz);
202 memcpy(to, opt, sz);
203}
204
205static inline u32 msg_bcast_ack(struct tipc_msg *m) 193static inline u32 msg_bcast_ack(struct tipc_msg *m)
206{ 194{
207 return msg_bits(m, 1, 0, 0xffff); 195 return msg_bits(m, 1, 0, 0xffff);
@@ -330,17 +318,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
330 return (struct tipc_msg *)msg_data(m); 318 return (struct tipc_msg *)msg_data(m);
331} 319}
332 320
333static inline void msg_expand(struct tipc_msg *m, u32 destnode)
334{
335 if (!msg_short(m))
336 return;
337 msg_set_hdr_sz(m, LONG_H_SIZE);
338 msg_set_orignode(m, msg_prevnode(m));
339 msg_set_destnode(m, destnode);
340 memset(&m->hdr[8], 0, 12);
341}
342
343
344 321
345/* 322/*
346 TIPC internal message header format, version 2 323 TIPC internal message header format, version 2
@@ -388,7 +365,6 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
388#define NAME_DISTRIBUTOR 11 365#define NAME_DISTRIBUTOR 11
389#define MSG_FRAGMENTER 12 366#define MSG_FRAGMENTER 12
390#define LINK_CONFIG 13 367#define LINK_CONFIG 13
391#define INT_H_SIZE 40
392#define DSC_H_SIZE 40 368#define DSC_H_SIZE 40
393 369
394/* 370/*
diff --git a/net/tipc/port.c b/net/tipc/port.c
index f508614ca59b..2f5806410c64 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -242,7 +242,8 @@ u32 tipc_createport_raw(void *usr_handle,
242 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; 242 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
243 p_ptr->publ.ref = ref; 243 p_ptr->publ.ref = ref;
244 msg = &p_ptr->publ.phdr; 244 msg = &p_ptr->publ.phdr;
245 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0); 245 msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE,
246 0);
246 msg_set_orignode(msg, tipc_own_addr); 247 msg_set_orignode(msg, tipc_own_addr);
247 msg_set_prevnode(msg, tipc_own_addr); 248 msg_set_prevnode(msg, tipc_own_addr);
248 msg_set_origport(msg, ref); 249 msg_set_origport(msg, ref);
@@ -413,13 +414,6 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
413 return buf; 414 return buf;
414} 415}
415 416
416int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz)
417{
418 msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr));
419 msg_set_options(&tp_ptr->phdr, opt, sz);
420 return TIPC_OK;
421}
422
423int tipc_reject_msg(struct sk_buff *buf, u32 err) 417int tipc_reject_msg(struct sk_buff *buf, u32 err)
424{ 418{
425 struct tipc_msg *msg = buf_msg(buf); 419 struct tipc_msg *msg = buf_msg(buf);
@@ -632,7 +626,7 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
632 msg_orignode(msg), 626 msg_orignode(msg),
633 msg_destport(msg), 627 msg_destport(msg),
634 tipc_own_addr, 628 tipc_own_addr,
635 DATA_HIGH, 629 TIPC_HIGH_IMPORTANCE,
636 TIPC_CONN_MSG, 630 TIPC_CONN_MSG,
637 err, 631 err,
638 0, 632 0,
@@ -1246,6 +1240,28 @@ exit:
1246 return res; 1240 return res;
1247} 1241}
1248 1242
1243/**
1244 * tipc_disconnect_port - disconnect port from peer
1245 *
1246 * Port must be locked.
1247 */
1248
1249int tipc_disconnect_port(struct tipc_port *tp_ptr)
1250{
1251 int res;
1252
1253 if (tp_ptr->connected) {
1254 tp_ptr->connected = 0;
1255 /* let timer expire on it's own to avoid deadlock! */
1256 tipc_nodesub_unsubscribe(
1257 &((struct port *)tp_ptr)->subscription);
1258 res = TIPC_OK;
1259 } else {
1260 res = -ENOTCONN;
1261 }
1262 return res;
1263}
1264
1249/* 1265/*
1250 * tipc_disconnect(): Disconnect port form peer. 1266 * tipc_disconnect(): Disconnect port form peer.
1251 * This is a node local operation. 1267 * This is a node local operation.
@@ -1254,17 +1270,12 @@ exit:
1254int tipc_disconnect(u32 ref) 1270int tipc_disconnect(u32 ref)
1255{ 1271{
1256 struct port *p_ptr; 1272 struct port *p_ptr;
1257 int res = -ENOTCONN; 1273 int res;
1258 1274
1259 p_ptr = tipc_port_lock(ref); 1275 p_ptr = tipc_port_lock(ref);
1260 if (!p_ptr) 1276 if (!p_ptr)
1261 return -EINVAL; 1277 return -EINVAL;
1262 if (p_ptr->publ.connected) { 1278 res = tipc_disconnect_port((struct tipc_port *)p_ptr);
1263 p_ptr->publ.connected = 0;
1264 /* let timer expire on it's own to avoid deadlock! */
1265 tipc_nodesub_unsubscribe(&p_ptr->subscription);
1266 res = TIPC_OK;
1267 }
1268 tipc_port_unlock(p_ptr); 1279 tipc_port_unlock(p_ptr);
1269 return res; 1280 return res;
1270} 1281}
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index c38744c96ed1..89cbab24d08f 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -2,7 +2,7 @@
2 * net/tipc/ref.c: TIPC object registry code 2 * net/tipc/ref.c: TIPC object registry code
3 * 3 *
4 * Copyright (c) 1991-2006, Ericsson AB 4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -36,32 +36,60 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "ref.h" 38#include "ref.h"
39#include "port.h" 39
40#include "subscr.h" 40/**
41#include "name_distr.h" 41 * struct reference - TIPC object reference entry
42#include "name_table.h" 42 * @object: pointer to object associated with reference entry
43#include "config.h" 43 * @lock: spinlock controlling access to object
44#include "discover.h" 44 * @ref: reference value for object (combines instance & array index info)
45#include "bearer.h" 45 */
46#include "node.h" 46
47#include "bcast.h" 47struct reference {
48 void *object;
49 spinlock_t lock;
50 u32 ref;
51};
52
53/**
54 * struct tipc_ref_table - table of TIPC object reference entries
55 * @entries: pointer to array of reference entries
56 * @capacity: array index of first unusable entry
57 * @init_point: array index of first uninitialized entry
58 * @first_free: array index of first unused object reference entry
59 * @last_free: array index of last unused object reference entry
60 * @index_mask: bitmask for array index portion of reference values
61 * @start_mask: initial value for instance value portion of reference values
62 */
63
64struct ref_table {
65 struct reference *entries;
66 u32 capacity;
67 u32 init_point;
68 u32 first_free;
69 u32 last_free;
70 u32 index_mask;
71 u32 start_mask;
72};
48 73
49/* 74/*
50 * Object reference table consists of 2**N entries. 75 * Object reference table consists of 2**N entries.
51 * 76 *
52 * A used entry has object ptr != 0, reference == XXXX|own index 77 * State Object ptr Reference
53 * (XXXX changes each time entry is acquired) 78 * ----- ---------- ---------
54 * A free entry has object ptr == 0, reference == YYYY|next free index 79 * In use non-NULL XXXX|own index
55 * (YYYY is one more than last used XXXX) 80 * (XXXX changes each time entry is acquired)
81 * Free NULL YYYY|next free index
82 * (YYYY is one more than last used XXXX)
83 * Uninitialized NULL 0
56 * 84 *
57 * Free list is initially chained from entry (2**N)-1 to entry 1. 85 * Entry 0 is not used; this allows index 0 to denote the end of the free list.
58 * Entry 0 is not used to allow index 0 to indicate the end of the free list.
59 * 86 *
60 * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0 87 * Note that a reference value of 0 does not necessarily indicate that an
61 * because entry 0's reference field has the form XXXX|1--1. 88 * entry is uninitialized, since the last entry in the free list could also
89 * have a reference value of 0 (although this is unlikely).
62 */ 90 */
63 91
64struct ref_table tipc_ref_table = { NULL }; 92static struct ref_table tipc_ref_table = { NULL };
65 93
66static DEFINE_RWLOCK(ref_table_lock); 94static DEFINE_RWLOCK(ref_table_lock);
67 95
@@ -72,29 +100,29 @@ static DEFINE_RWLOCK(ref_table_lock);
72int tipc_ref_table_init(u32 requested_size, u32 start) 100int tipc_ref_table_init(u32 requested_size, u32 start)
73{ 101{
74 struct reference *table; 102 struct reference *table;
75 u32 sz = 1 << 4; 103 u32 actual_size;
76 u32 index_mask;
77 int i;
78 104
79 while (sz < requested_size) { 105 /* account for unused entry, then round up size to a power of 2 */
80 sz <<= 1; 106
81 } 107 requested_size++;
82 table = vmalloc(sz * sizeof(*table)); 108 for (actual_size = 16; actual_size < requested_size; actual_size <<= 1)
109 /* do nothing */ ;
110
111 /* allocate table & mark all entries as uninitialized */
112
113 table = __vmalloc(actual_size * sizeof(struct reference),
114 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
83 if (table == NULL) 115 if (table == NULL)
84 return -ENOMEM; 116 return -ENOMEM;
85 117
86 write_lock_bh(&ref_table_lock);
87 index_mask = sz - 1;
88 for (i = sz - 1; i >= 0; i--) {
89 table[i].object = NULL;
90 spin_lock_init(&table[i].lock);
91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
92 }
93 tipc_ref_table.entries = table; 118 tipc_ref_table.entries = table;
94 tipc_ref_table.index_mask = index_mask; 119 tipc_ref_table.capacity = requested_size;
95 tipc_ref_table.first_free = sz - 1; 120 tipc_ref_table.init_point = 1;
96 tipc_ref_table.last_free = 1; 121 tipc_ref_table.first_free = 0;
97 write_unlock_bh(&ref_table_lock); 122 tipc_ref_table.last_free = 0;
123 tipc_ref_table.index_mask = actual_size - 1;
124 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
125
98 return TIPC_OK; 126 return TIPC_OK;
99} 127}
100 128
@@ -125,7 +153,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
125 u32 index; 153 u32 index;
126 u32 index_mask; 154 u32 index_mask;
127 u32 next_plus_upper; 155 u32 next_plus_upper;
128 u32 reference = 0; 156 u32 ref;
129 157
130 if (!object) { 158 if (!object) {
131 err("Attempt to acquire reference to non-existent object\n"); 159 err("Attempt to acquire reference to non-existent object\n");
@@ -136,6 +164,8 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
136 return 0; 164 return 0;
137 } 165 }
138 166
167 /* take a free entry, if available; otherwise initialize a new entry */
168
139 write_lock_bh(&ref_table_lock); 169 write_lock_bh(&ref_table_lock);
140 if (tipc_ref_table.first_free) { 170 if (tipc_ref_table.first_free) {
141 index = tipc_ref_table.first_free; 171 index = tipc_ref_table.first_free;
@@ -143,17 +173,29 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
143 index_mask = tipc_ref_table.index_mask; 173 index_mask = tipc_ref_table.index_mask;
144 /* take lock in case a previous user of entry still holds it */ 174 /* take lock in case a previous user of entry still holds it */
145 spin_lock_bh(&entry->lock); 175 spin_lock_bh(&entry->lock);
146 next_plus_upper = entry->data.next_plus_upper; 176 next_plus_upper = entry->ref;
147 tipc_ref_table.first_free = next_plus_upper & index_mask; 177 tipc_ref_table.first_free = next_plus_upper & index_mask;
148 reference = (next_plus_upper & ~index_mask) + index; 178 ref = (next_plus_upper & ~index_mask) + index;
149 entry->data.reference = reference; 179 entry->ref = ref;
150 entry->object = object; 180 entry->object = object;
151 if (lock != NULL)
152 *lock = &entry->lock;
153 spin_unlock_bh(&entry->lock); 181 spin_unlock_bh(&entry->lock);
182 *lock = &entry->lock;
183 }
184 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
185 index = tipc_ref_table.init_point++;
186 entry = &(tipc_ref_table.entries[index]);
187 spin_lock_init(&entry->lock);
188 ref = tipc_ref_table.start_mask + index;
189 entry->ref = ref;
190 entry->object = object;
191 *lock = &entry->lock;
192 }
193 else {
194 ref = 0;
154 } 195 }
155 write_unlock_bh(&ref_table_lock); 196 write_unlock_bh(&ref_table_lock);
156 return reference; 197
198 return ref;
157} 199}
158 200
159/** 201/**
@@ -169,42 +211,99 @@ void tipc_ref_discard(u32 ref)
169 u32 index; 211 u32 index;
170 u32 index_mask; 212 u32 index_mask;
171 213
172 if (!ref) {
173 err("Attempt to discard reference 0\n");
174 return;
175 }
176 if (!tipc_ref_table.entries) { 214 if (!tipc_ref_table.entries) {
177 err("Reference table not found during discard attempt\n"); 215 err("Reference table not found during discard attempt\n");
178 return; 216 return;
179 } 217 }
180 218
181 write_lock_bh(&ref_table_lock);
182 index_mask = tipc_ref_table.index_mask; 219 index_mask = tipc_ref_table.index_mask;
183 index = ref & index_mask; 220 index = ref & index_mask;
184 entry = &(tipc_ref_table.entries[index]); 221 entry = &(tipc_ref_table.entries[index]);
185 222
223 write_lock_bh(&ref_table_lock);
224
186 if (!entry->object) { 225 if (!entry->object) {
187 err("Attempt to discard reference to non-existent object\n"); 226 err("Attempt to discard reference to non-existent object\n");
188 goto exit; 227 goto exit;
189 } 228 }
190 if (entry->data.reference != ref) { 229 if (entry->ref != ref) {
191 err("Attempt to discard non-existent reference\n"); 230 err("Attempt to discard non-existent reference\n");
192 goto exit; 231 goto exit;
193 } 232 }
194 233
195 /* mark entry as unused */ 234 /*
235 * mark entry as unused; increment instance part of entry's reference
236 * to invalidate any subsequent references
237 */
238
196 entry->object = NULL; 239 entry->object = NULL;
240 entry->ref = (ref & ~index_mask) + (index_mask + 1);
241
242 /* append entry to free entry list */
243
197 if (tipc_ref_table.first_free == 0) 244 if (tipc_ref_table.first_free == 0)
198 tipc_ref_table.first_free = index; 245 tipc_ref_table.first_free = index;
199 else 246 else
200 /* next_plus_upper is always XXXX|0--0 for last free entry */ 247 tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
201 tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper
202 |= index;
203 tipc_ref_table.last_free = index; 248 tipc_ref_table.last_free = index;
204 249
205 /* increment upper bits of entry to invalidate subsequent references */
206 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
207exit: 250exit:
208 write_unlock_bh(&ref_table_lock); 251 write_unlock_bh(&ref_table_lock);
209} 252}
210 253
254/**
255 * tipc_ref_lock - lock referenced object and return pointer to it
256 */
257
258void *tipc_ref_lock(u32 ref)
259{
260 if (likely(tipc_ref_table.entries)) {
261 struct reference *entry;
262
263 entry = &tipc_ref_table.entries[ref &
264 tipc_ref_table.index_mask];
265 if (likely(entry->ref != 0)) {
266 spin_lock_bh(&entry->lock);
267 if (likely((entry->ref == ref) && (entry->object)))
268 return entry->object;
269 spin_unlock_bh(&entry->lock);
270 }
271 }
272 return NULL;
273}
274
275/**
276 * tipc_ref_unlock - unlock referenced object
277 */
278
279void tipc_ref_unlock(u32 ref)
280{
281 if (likely(tipc_ref_table.entries)) {
282 struct reference *entry;
283
284 entry = &tipc_ref_table.entries[ref &
285 tipc_ref_table.index_mask];
286 if (likely((entry->ref == ref) && (entry->object)))
287 spin_unlock_bh(&entry->lock);
288 else
289 err("Attempt to unlock non-existent reference\n");
290 }
291}
292
293/**
294 * tipc_ref_deref - return pointer referenced object (without locking it)
295 */
296
297void *tipc_ref_deref(u32 ref)
298{
299 if (likely(tipc_ref_table.entries)) {
300 struct reference *entry;
301
302 entry = &tipc_ref_table.entries[ref &
303 tipc_ref_table.index_mask];
304 if (likely(entry->ref == ref))
305 return entry->object;
306 }
307 return NULL;
308}
309
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
index 38f3a7f4a78d..7e3798ea93b9 100644
--- a/net/tipc/ref.h
+++ b/net/tipc/ref.h
@@ -2,7 +2,7 @@
2 * net/tipc/ref.h: Include file for TIPC object registry code 2 * net/tipc/ref.h: Include file for TIPC object registry code
3 * 3 *
4 * Copyright (c) 1991-2006, Ericsson AB 4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -37,95 +37,14 @@
37#ifndef _TIPC_REF_H 37#ifndef _TIPC_REF_H
38#define _TIPC_REF_H 38#define _TIPC_REF_H
39 39
40/**
41 * struct reference - TIPC object reference entry
42 * @object: pointer to object associated with reference entry
43 * @lock: spinlock controlling access to object
44 * @data: reference value associated with object (or link to next unused entry)
45 */
46
47struct reference {
48 void *object;
49 spinlock_t lock;
50 union {
51 u32 next_plus_upper;
52 u32 reference;
53 } data;
54};
55
56/**
57 * struct tipc_ref_table - table of TIPC object reference entries
58 * @entries: pointer to array of reference entries
59 * @index_mask: bitmask for array index portion of reference values
60 * @first_free: array index of first unused object reference entry
61 * @last_free: array index of last unused object reference entry
62 */
63
64struct ref_table {
65 struct reference *entries;
66 u32 index_mask;
67 u32 first_free;
68 u32 last_free;
69};
70
71extern struct ref_table tipc_ref_table;
72
73int tipc_ref_table_init(u32 requested_size, u32 start); 40int tipc_ref_table_init(u32 requested_size, u32 start);
74void tipc_ref_table_stop(void); 41void tipc_ref_table_stop(void);
75 42
76u32 tipc_ref_acquire(void *object, spinlock_t **lock); 43u32 tipc_ref_acquire(void *object, spinlock_t **lock);
77void tipc_ref_discard(u32 ref); 44void tipc_ref_discard(u32 ref);
78 45
79 46void *tipc_ref_lock(u32 ref);
80/** 47void tipc_ref_unlock(u32 ref);
81 * tipc_ref_lock - lock referenced object and return pointer to it 48void *tipc_ref_deref(u32 ref);
82 */
83
84static inline void *tipc_ref_lock(u32 ref)
85{
86 if (likely(tipc_ref_table.entries)) {
87 struct reference *r =
88 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
89
90 spin_lock_bh(&r->lock);
91 if (likely(r->data.reference == ref))
92 return r->object;
93 spin_unlock_bh(&r->lock);
94 }
95 return NULL;
96}
97
98/**
99 * tipc_ref_unlock - unlock referenced object
100 */
101
102static inline void tipc_ref_unlock(u32 ref)
103{
104 if (likely(tipc_ref_table.entries)) {
105 struct reference *r =
106 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
107
108 if (likely(r->data.reference == ref))
109 spin_unlock_bh(&r->lock);
110 else
111 err("tipc_ref_unlock() invoked using obsolete reference\n");
112 }
113}
114
115/**
116 * tipc_ref_deref - return pointer referenced object (without locking it)
117 */
118
119static inline void *tipc_ref_deref(u32 ref)
120{
121 if (likely(tipc_ref_table.entries)) {
122 struct reference *r =
123 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
124
125 if (likely(r->data.reference == ref))
126 return r->object;
127 }
128 return NULL;
129}
130 49
131#endif 50#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 22909036b9bc..05853159536a 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -43,7 +43,6 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/fcntl.h> 45#include <linux/fcntl.h>
46#include <asm/semaphore.h>
47#include <asm/string.h> 46#include <asm/string.h>
48#include <asm/atomic.h> 47#include <asm/atomic.h>
49#include <net/sock.h> 48#include <net/sock.h>
@@ -58,16 +57,18 @@
58#define SS_LISTENING -1 /* socket is listening */ 57#define SS_LISTENING -1 /* socket is listening */
59#define SS_READY -2 /* socket is connectionless */ 58#define SS_READY -2 /* socket is connectionless */
60 59
61#define OVERLOAD_LIMIT_BASE 5000 60#define OVERLOAD_LIMIT_BASE 5000
61#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
62 62
63struct tipc_sock { 63struct tipc_sock {
64 struct sock sk; 64 struct sock sk;
65 struct tipc_port *p; 65 struct tipc_port *p;
66 struct semaphore sem;
67}; 66};
68 67
69#define tipc_sk(sk) ((struct tipc_sock*)sk) 68#define tipc_sk(sk) ((struct tipc_sock *)(sk))
69#define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p))
70 70
71static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
71static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); 72static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
72static void wakeupdispatch(struct tipc_port *tport); 73static void wakeupdispatch(struct tipc_port *tport);
73 74
@@ -81,93 +82,115 @@ static int sockets_enabled = 0;
81 82
82static atomic_t tipc_queue_size = ATOMIC_INIT(0); 83static atomic_t tipc_queue_size = ATOMIC_INIT(0);
83 84
84
85/* 85/*
86 * sock_lock(): Lock a port/socket pair. lock_sock() can 86 * Revised TIPC socket locking policy:
87 * not be used here, since the same lock must protect ports 87 *
88 * with non-socket interfaces. 88 * Most socket operations take the standard socket lock when they start
89 * See net.c for description of locking policy. 89 * and hold it until they finish (or until they need to sleep). Acquiring
90 * this lock grants the owner exclusive access to the fields of the socket
91 * data structures, with the exception of the backlog queue. A few socket
92 * operations can be done without taking the socket lock because they only
93 * read socket information that never changes during the life of the socket.
94 *
95 * Socket operations may acquire the lock for the associated TIPC port if they
96 * need to perform an operation on the port. If any routine needs to acquire
97 * both the socket lock and the port lock it must take the socket lock first
98 * to avoid the risk of deadlock.
99 *
100 * The dispatcher handling incoming messages cannot grab the socket lock in
101 * the standard fashion, since invoked it runs at the BH level and cannot block.
102 * Instead, it checks to see if the socket lock is currently owned by someone,
103 * and either handles the message itself or adds it to the socket's backlog
104 * queue; in the latter case the queued message is processed once the process
105 * owning the socket lock releases it.
106 *
107 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
108 * the problem of a blocked socket operation preventing any other operations
109 * from occurring. However, applications must be careful if they have
110 * multiple threads trying to send (or receive) on the same socket, as these
111 * operations might interfere with each other. For example, doing a connect
112 * and a receive at the same time might allow the receive to consume the
113 * ACK message meant for the connect. While additional work could be done
114 * to try and overcome this, it doesn't seem to be worthwhile at the present.
115 *
116 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
117 * that another operation that must be performed in a non-blocking manner is
118 * not delayed for very long because the lock has already been taken.
119 *
120 * NOTE: This code assumes that certain fields of a port/socket pair are
121 * constant over its lifetime; such fields can be examined without taking
122 * the socket lock and/or port lock, and do not need to be re-read even
123 * after resuming processing after waiting. These fields include:
124 * - socket type
125 * - pointer to socket sk structure (aka tipc_sock structure)
126 * - pointer to port structure
127 * - port reference
90 */ 128 */
91static void sock_lock(struct tipc_sock* tsock)
92{
93 spin_lock_bh(tsock->p->lock);
94}
95 129
96/* 130/**
97 * sock_unlock(): Unlock a port/socket pair 131 * advance_rx_queue - discard first buffer in socket receive queue
132 *
133 * Caller must hold socket lock
98 */ 134 */
99static void sock_unlock(struct tipc_sock* tsock) 135
136static void advance_rx_queue(struct sock *sk)
100{ 137{
101 spin_unlock_bh(tsock->p->lock); 138 buf_discard(__skb_dequeue(&sk->sk_receive_queue));
139 atomic_dec(&tipc_queue_size);
102} 140}
103 141
104/** 142/**
105 * pollmask - determine the current set of poll() events for a socket 143 * discard_rx_queue - discard all buffers in socket receive queue
106 * @sock: socket structure
107 *
108 * TIPC sets the returned events as follows:
109 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
110 * or if a connection-oriented socket is does not have an active connection
111 * (i.e. a read operation will not block).
112 * b) POLLOUT is set except when a socket's connection has been terminated
113 * (i.e. a write operation will not block).
114 * c) POLLHUP is set when a socket's connection has been terminated.
115 *
116 * IMPORTANT: The fact that a read or write operation will not block does NOT
117 * imply that the operation will succeed!
118 * 144 *
119 * Returns pollmask value 145 * Caller must hold socket lock
120 */ 146 */
121 147
122static u32 pollmask(struct socket *sock) 148static void discard_rx_queue(struct sock *sk)
123{ 149{
124 u32 mask; 150 struct sk_buff *buf;
125
126 if ((skb_queue_len(&sock->sk->sk_receive_queue) != 0) ||
127 (sock->state == SS_UNCONNECTED) ||
128 (sock->state == SS_DISCONNECTING))
129 mask = (POLLRDNORM | POLLIN);
130 else
131 mask = 0;
132
133 if (sock->state == SS_DISCONNECTING)
134 mask |= POLLHUP;
135 else
136 mask |= POLLOUT;
137 151
138 return mask; 152 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
153 atomic_dec(&tipc_queue_size);
154 buf_discard(buf);
155 }
139} 156}
140 157
141
142/** 158/**
143 * advance_queue - discard first buffer in queue 159 * reject_rx_queue - reject all buffers in socket receive queue
144 * @tsock: TIPC socket 160 *
161 * Caller must hold socket lock
145 */ 162 */
146 163
147static void advance_queue(struct tipc_sock *tsock) 164static void reject_rx_queue(struct sock *sk)
148{ 165{
149 sock_lock(tsock); 166 struct sk_buff *buf;
150 buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue)); 167
151 sock_unlock(tsock); 168 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
152 atomic_dec(&tipc_queue_size); 169 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
170 atomic_dec(&tipc_queue_size);
171 }
153} 172}
154 173
155/** 174/**
156 * tipc_create - create a TIPC socket 175 * tipc_create - create a TIPC socket
176 * @net: network namespace (must be default network)
157 * @sock: pre-allocated socket structure 177 * @sock: pre-allocated socket structure
158 * @protocol: protocol indicator (must be 0) 178 * @protocol: protocol indicator (must be 0)
159 * 179 *
160 * This routine creates and attaches a 'struct sock' to the 'struct socket', 180 * This routine creates additional data structures used by the TIPC socket,
161 * then create and attaches a TIPC port to the 'struct sock' part. 181 * initializes them, and links them together.
162 * 182 *
163 * Returns 0 on success, errno otherwise 183 * Returns 0 on success, errno otherwise
164 */ 184 */
185
165static int tipc_create(struct net *net, struct socket *sock, int protocol) 186static int tipc_create(struct net *net, struct socket *sock, int protocol)
166{ 187{
167 struct tipc_sock *tsock; 188 const struct proto_ops *ops;
168 struct tipc_port *port; 189 socket_state state;
169 struct sock *sk; 190 struct sock *sk;
170 u32 ref; 191 u32 portref;
192
193 /* Validate arguments */
171 194
172 if (net != &init_net) 195 if (net != &init_net)
173 return -EAFNOSUPPORT; 196 return -EAFNOSUPPORT;
@@ -175,54 +198,56 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
175 if (unlikely(protocol != 0)) 198 if (unlikely(protocol != 0))
176 return -EPROTONOSUPPORT; 199 return -EPROTONOSUPPORT;
177 200
178 ref = tipc_createport_raw(NULL, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
179 if (unlikely(!ref))
180 return -ENOMEM;
181
182 sock->state = SS_UNCONNECTED;
183
184 switch (sock->type) { 201 switch (sock->type) {
185 case SOCK_STREAM: 202 case SOCK_STREAM:
186 sock->ops = &stream_ops; 203 ops = &stream_ops;
204 state = SS_UNCONNECTED;
187 break; 205 break;
188 case SOCK_SEQPACKET: 206 case SOCK_SEQPACKET:
189 sock->ops = &packet_ops; 207 ops = &packet_ops;
208 state = SS_UNCONNECTED;
190 break; 209 break;
191 case SOCK_DGRAM: 210 case SOCK_DGRAM:
192 tipc_set_portunreliable(ref, 1);
193 /* fall through */
194 case SOCK_RDM: 211 case SOCK_RDM:
195 tipc_set_portunreturnable(ref, 1); 212 ops = &msg_ops;
196 sock->ops = &msg_ops; 213 state = SS_READY;
197 sock->state = SS_READY;
198 break; 214 break;
199 default: 215 default:
200 tipc_deleteport(ref);
201 return -EPROTOTYPE; 216 return -EPROTOTYPE;
202 } 217 }
203 218
219 /* Allocate socket's protocol area */
220
204 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); 221 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
205 if (!sk) { 222 if (sk == NULL)
206 tipc_deleteport(ref);
207 return -ENOMEM; 223 return -ENOMEM;
208 }
209 224
210 sock_init_data(sock, sk); 225 /* Allocate TIPC port for socket to use */
211 init_waitqueue_head(sk->sk_sleep);
212 sk->sk_rcvtimeo = 8 * HZ; /* default connect timeout = 8s */
213 226
214 tsock = tipc_sk(sk); 227 portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
215 port = tipc_get_port(ref); 228 TIPC_LOW_IMPORTANCE);
229 if (unlikely(portref == 0)) {
230 sk_free(sk);
231 return -ENOMEM;
232 }
216 233
217 tsock->p = port; 234 /* Finish initializing socket data structures */
218 port->usr_handle = tsock;
219 235
220 init_MUTEX(&tsock->sem); 236 sock->ops = ops;
237 sock->state = state;
221 238
222 dbg("sock_create: %x\n",tsock); 239 sock_init_data(sock, sk);
240 sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
241 sk->sk_backlog_rcv = backlog_rcv;
242 tipc_sk(sk)->p = tipc_get_port(portref);
223 243
224 atomic_inc(&tipc_user_count); 244 if (sock->state == SS_READY) {
245 tipc_set_portunreturnable(portref, 1);
246 if (sock->type == SOCK_DGRAM)
247 tipc_set_portunreliable(portref, 1);
248 }
225 249
250 atomic_inc(&tipc_user_count);
226 return 0; 251 return 0;
227} 252}
228 253
@@ -245,52 +270,62 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
245 270
246static int release(struct socket *sock) 271static int release(struct socket *sock)
247{ 272{
248 struct tipc_sock *tsock = tipc_sk(sock->sk);
249 struct sock *sk = sock->sk; 273 struct sock *sk = sock->sk;
250 int res = TIPC_OK; 274 struct tipc_port *tport;
251 struct sk_buff *buf; 275 struct sk_buff *buf;
276 int res;
252 277
253 dbg("sock_delete: %x\n",tsock); 278 /*
254 if (!tsock) 279 * Exit if socket isn't fully initialized (occurs when a failed accept()
255 return 0; 280 * releases a pre-allocated child socket that was never used)
256 down(&tsock->sem); 281 */
257 if (!sock->sk) { 282
258 up(&tsock->sem); 283 if (sk == NULL)
259 return 0; 284 return 0;
260 }
261 285
262 /* Reject unreceived messages, unless no longer connected */ 286 tport = tipc_sk_port(sk);
287 lock_sock(sk);
288
289 /*
290 * Reject all unreceived messages, except on an active connection
291 * (which disconnects locally & sends a 'FIN+' to peer)
292 */
263 293
264 while (sock->state != SS_DISCONNECTING) { 294 while (sock->state != SS_DISCONNECTING) {
265 sock_lock(tsock); 295 buf = __skb_dequeue(&sk->sk_receive_queue);
266 buf = skb_dequeue(&sk->sk_receive_queue); 296 if (buf == NULL)
267 if (!buf)
268 tsock->p->usr_handle = NULL;
269 sock_unlock(tsock);
270 if (!buf)
271 break; 297 break;
298 atomic_dec(&tipc_queue_size);
272 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) 299 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
273 buf_discard(buf); 300 buf_discard(buf);
274 else 301 else {
302 if ((sock->state == SS_CONNECTING) ||
303 (sock->state == SS_CONNECTED)) {
304 sock->state = SS_DISCONNECTING;
305 tipc_disconnect(tport->ref);
306 }
275 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 307 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
276 atomic_dec(&tipc_queue_size); 308 }
277 } 309 }
278 310
279 /* Delete TIPC port */ 311 /*
312 * Delete TIPC port; this ensures no more messages are queued
313 * (also disconnects an active connection & sends a 'FIN-' to peer)
314 */
280 315
281 res = tipc_deleteport(tsock->p->ref); 316 res = tipc_deleteport(tport->ref);
282 sock->sk = NULL;
283 317
284 /* Discard any remaining messages */ 318 /* Discard any remaining (connection-based) messages in receive queue */
285 319
286 while ((buf = skb_dequeue(&sk->sk_receive_queue))) { 320 discard_rx_queue(sk);
287 buf_discard(buf); 321
288 atomic_dec(&tipc_queue_size); 322 /* Reject any messages that accumulated in backlog queue */
289 }
290 323
291 up(&tsock->sem); 324 sock->state = SS_DISCONNECTING;
325 release_sock(sk);
292 326
293 sock_put(sk); 327 sock_put(sk);
328 sock->sk = NULL;
294 329
295 atomic_dec(&tipc_user_count); 330 atomic_dec(&tipc_user_count);
296 return res; 331 return res;
@@ -307,47 +342,32 @@ static int release(struct socket *sock)
307 * (i.e. a socket address length of 0) unbinds all names from the socket. 342 * (i.e. a socket address length of 0) unbinds all names from the socket.
308 * 343 *
309 * Returns 0 on success, errno otherwise 344 * Returns 0 on success, errno otherwise
345 *
346 * NOTE: This routine doesn't need to take the socket lock since it doesn't
347 * access any non-constant socket information.
310 */ 348 */
311 349
312static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) 350static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
313{ 351{
314 struct tipc_sock *tsock = tipc_sk(sock->sk);
315 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 352 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
316 int res; 353 u32 portref = tipc_sk_port(sock->sk)->ref;
317 354
318 if (down_interruptible(&tsock->sem)) 355 if (unlikely(!uaddr_len))
319 return -ERESTARTSYS; 356 return tipc_withdraw(portref, 0, NULL);
320 357
321 if (unlikely(!uaddr_len)) { 358 if (uaddr_len < sizeof(struct sockaddr_tipc))
322 res = tipc_withdraw(tsock->p->ref, 0, NULL); 359 return -EINVAL;
323 goto exit; 360 if (addr->family != AF_TIPC)
324 } 361 return -EAFNOSUPPORT;
325
326 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
327 res = -EINVAL;
328 goto exit;
329 }
330 362
331 if (addr->family != AF_TIPC) {
332 res = -EAFNOSUPPORT;
333 goto exit;
334 }
335 if (addr->addrtype == TIPC_ADDR_NAME) 363 if (addr->addrtype == TIPC_ADDR_NAME)
336 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 364 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
337 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 365 else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
338 res = -EAFNOSUPPORT; 366 return -EAFNOSUPPORT;
339 goto exit;
340 }
341 367
342 if (addr->scope > 0) 368 return (addr->scope > 0) ?
343 res = tipc_publish(tsock->p->ref, addr->scope, 369 tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
344 &addr->addr.nameseq); 370 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
345 else
346 res = tipc_withdraw(tsock->p->ref, -addr->scope,
347 &addr->addr.nameseq);
348exit:
349 up(&tsock->sem);
350 return res;
351} 371}
352 372
353/** 373/**
@@ -358,30 +378,33 @@ exit:
358 * @peer: 0 to obtain socket name, 1 to obtain peer socket name 378 * @peer: 0 to obtain socket name, 1 to obtain peer socket name
359 * 379 *
360 * Returns 0 on success, errno otherwise 380 * Returns 0 on success, errno otherwise
381 *
382 * NOTE: This routine doesn't need to take the socket lock since it doesn't
383 * access any non-constant socket information.
361 */ 384 */
362 385
363static int get_name(struct socket *sock, struct sockaddr *uaddr, 386static int get_name(struct socket *sock, struct sockaddr *uaddr,
364 int *uaddr_len, int peer) 387 int *uaddr_len, int peer)
365{ 388{
366 struct tipc_sock *tsock = tipc_sk(sock->sk);
367 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 389 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
390 u32 portref = tipc_sk_port(sock->sk)->ref;
368 u32 res; 391 u32 res;
369 392
370 if (down_interruptible(&tsock->sem)) 393 if (peer) {
371 return -ERESTARTSYS; 394 res = tipc_peer(portref, &addr->addr.id);
395 if (res)
396 return res;
397 } else {
398 tipc_ownidentity(portref, &addr->addr.id);
399 }
372 400
373 *uaddr_len = sizeof(*addr); 401 *uaddr_len = sizeof(*addr);
374 addr->addrtype = TIPC_ADDR_ID; 402 addr->addrtype = TIPC_ADDR_ID;
375 addr->family = AF_TIPC; 403 addr->family = AF_TIPC;
376 addr->scope = 0; 404 addr->scope = 0;
377 if (peer)
378 res = tipc_peer(tsock->p->ref, &addr->addr.id);
379 else
380 res = tipc_ownidentity(tsock->p->ref, &addr->addr.id);
381 addr->addr.name.domain = 0; 405 addr->addr.name.domain = 0;
382 406
383 up(&tsock->sem); 407 return 0;
384 return res;
385} 408}
386 409
387/** 410/**
@@ -390,15 +413,47 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
390 * @sock: socket for which to calculate the poll bits 413 * @sock: socket for which to calculate the poll bits
391 * @wait: ??? 414 * @wait: ???
392 * 415 *
393 * Returns the pollmask 416 * Returns pollmask value
417 *
418 * COMMENTARY:
419 * It appears that the usual socket locking mechanisms are not useful here
420 * since the pollmask info is potentially out-of-date the moment this routine
421 * exits. TCP and other protocols seem to rely on higher level poll routines
422 * to handle any preventable race conditions, so TIPC will do the same ...
423 *
424 * TIPC sets the returned events as follows:
425 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
426 * or if a connection-oriented socket is does not have an active connection
427 * (i.e. a read operation will not block).
428 * b) POLLOUT is set except when a socket's connection has been terminated
429 * (i.e. a write operation will not block).
430 * c) POLLHUP is set when a socket's connection has been terminated.
431 *
432 * IMPORTANT: The fact that a read or write operation will not block does NOT
433 * imply that the operation will succeed!
394 */ 434 */
395 435
396static unsigned int poll(struct file *file, struct socket *sock, 436static unsigned int poll(struct file *file, struct socket *sock,
397 poll_table *wait) 437 poll_table *wait)
398{ 438{
399 poll_wait(file, sock->sk->sk_sleep, wait); 439 struct sock *sk = sock->sk;
400 /* NEED LOCK HERE? */ 440 u32 mask;
401 return pollmask(sock); 441
442 poll_wait(file, sk->sk_sleep, wait);
443
444 if (!skb_queue_empty(&sk->sk_receive_queue) ||
445 (sock->state == SS_UNCONNECTED) ||
446 (sock->state == SS_DISCONNECTING))
447 mask = (POLLRDNORM | POLLIN);
448 else
449 mask = 0;
450
451 if (sock->state == SS_DISCONNECTING)
452 mask |= POLLHUP;
453 else
454 mask |= POLLOUT;
455
456 return mask;
402} 457}
403 458
404/** 459/**
@@ -420,7 +475,6 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
420 return 0; 475 return 0;
421 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV)) 476 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
422 return 0; 477 return 0;
423
424 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV)) 478 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
425 return -EACCES; 479 return -EACCES;
426 480
@@ -434,7 +488,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
434 488
435/** 489/**
436 * send_msg - send message in connectionless manner 490 * send_msg - send message in connectionless manner
437 * @iocb: (unused) 491 * @iocb: if NULL, indicates that socket lock is already held
438 * @sock: socket structure 492 * @sock: socket structure
439 * @m: message to send 493 * @m: message to send
440 * @total_len: length of message 494 * @total_len: length of message
@@ -450,9 +504,9 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
450static int send_msg(struct kiocb *iocb, struct socket *sock, 504static int send_msg(struct kiocb *iocb, struct socket *sock,
451 struct msghdr *m, size_t total_len) 505 struct msghdr *m, size_t total_len)
452{ 506{
453 struct tipc_sock *tsock = tipc_sk(sock->sk); 507 struct sock *sk = sock->sk;
508 struct tipc_port *tport = tipc_sk_port(sk);
454 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 509 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
455 struct sk_buff *buf;
456 int needs_conn; 510 int needs_conn;
457 int res = -EINVAL; 511 int res = -EINVAL;
458 512
@@ -462,48 +516,46 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
462 (dest->family != AF_TIPC))) 516 (dest->family != AF_TIPC)))
463 return -EINVAL; 517 return -EINVAL;
464 518
519 if (iocb)
520 lock_sock(sk);
521
465 needs_conn = (sock->state != SS_READY); 522 needs_conn = (sock->state != SS_READY);
466 if (unlikely(needs_conn)) { 523 if (unlikely(needs_conn)) {
467 if (sock->state == SS_LISTENING) 524 if (sock->state == SS_LISTENING) {
468 return -EPIPE; 525 res = -EPIPE;
469 if (sock->state != SS_UNCONNECTED) 526 goto exit;
470 return -EISCONN; 527 }
471 if ((tsock->p->published) || 528 if (sock->state != SS_UNCONNECTED) {
472 ((sock->type == SOCK_STREAM) && (total_len != 0))) 529 res = -EISCONN;
473 return -EOPNOTSUPP; 530 goto exit;
531 }
532 if ((tport->published) ||
533 ((sock->type == SOCK_STREAM) && (total_len != 0))) {
534 res = -EOPNOTSUPP;
535 goto exit;
536 }
474 if (dest->addrtype == TIPC_ADDR_NAME) { 537 if (dest->addrtype == TIPC_ADDR_NAME) {
475 tsock->p->conn_type = dest->addr.name.name.type; 538 tport->conn_type = dest->addr.name.name.type;
476 tsock->p->conn_instance = dest->addr.name.name.instance; 539 tport->conn_instance = dest->addr.name.name.instance;
477 } 540 }
478 }
479
480 if (down_interruptible(&tsock->sem))
481 return -ERESTARTSYS;
482
483 if (needs_conn) {
484 541
485 /* Abort any pending connection attempts (very unlikely) */ 542 /* Abort any pending connection attempts (very unlikely) */
486 543
487 while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) { 544 reject_rx_queue(sk);
488 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
489 atomic_dec(&tipc_queue_size);
490 }
491
492 sock->state = SS_CONNECTING;
493 } 545 }
494 546
495 do { 547 do {
496 if (dest->addrtype == TIPC_ADDR_NAME) { 548 if (dest->addrtype == TIPC_ADDR_NAME) {
497 if ((res = dest_name_check(dest, m))) 549 if ((res = dest_name_check(dest, m)))
498 goto exit; 550 break;
499 res = tipc_send2name(tsock->p->ref, 551 res = tipc_send2name(tport->ref,
500 &dest->addr.name.name, 552 &dest->addr.name.name,
501 dest->addr.name.domain, 553 dest->addr.name.domain,
502 m->msg_iovlen, 554 m->msg_iovlen,
503 m->msg_iov); 555 m->msg_iov);
504 } 556 }
505 else if (dest->addrtype == TIPC_ADDR_ID) { 557 else if (dest->addrtype == TIPC_ADDR_ID) {
506 res = tipc_send2port(tsock->p->ref, 558 res = tipc_send2port(tport->ref,
507 &dest->addr.id, 559 &dest->addr.id,
508 m->msg_iovlen, 560 m->msg_iovlen,
509 m->msg_iov); 561 m->msg_iov);
@@ -511,36 +563,43 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
511 else if (dest->addrtype == TIPC_ADDR_MCAST) { 563 else if (dest->addrtype == TIPC_ADDR_MCAST) {
512 if (needs_conn) { 564 if (needs_conn) {
513 res = -EOPNOTSUPP; 565 res = -EOPNOTSUPP;
514 goto exit; 566 break;
515 } 567 }
516 if ((res = dest_name_check(dest, m))) 568 if ((res = dest_name_check(dest, m)))
517 goto exit; 569 break;
518 res = tipc_multicast(tsock->p->ref, 570 res = tipc_multicast(tport->ref,
519 &dest->addr.nameseq, 571 &dest->addr.nameseq,
520 0, 572 0,
521 m->msg_iovlen, 573 m->msg_iovlen,
522 m->msg_iov); 574 m->msg_iov);
523 } 575 }
524 if (likely(res != -ELINKCONG)) { 576 if (likely(res != -ELINKCONG)) {
525exit: 577 if (needs_conn && (res >= 0)) {
526 up(&tsock->sem); 578 sock->state = SS_CONNECTING;
527 return res; 579 }
580 break;
528 } 581 }
529 if (m->msg_flags & MSG_DONTWAIT) { 582 if (m->msg_flags & MSG_DONTWAIT) {
530 res = -EWOULDBLOCK; 583 res = -EWOULDBLOCK;
531 goto exit; 584 break;
532 }
533 if (wait_event_interruptible(*sock->sk->sk_sleep,
534 !tsock->p->congested)) {
535 res = -ERESTARTSYS;
536 goto exit;
537 } 585 }
586 release_sock(sk);
587 res = wait_event_interruptible(*sk->sk_sleep,
588 !tport->congested);
589 lock_sock(sk);
590 if (res)
591 break;
538 } while (1); 592 } while (1);
593
594exit:
595 if (iocb)
596 release_sock(sk);
597 return res;
539} 598}
540 599
541/** 600/**
542 * send_packet - send a connection-oriented message 601 * send_packet - send a connection-oriented message
543 * @iocb: (unused) 602 * @iocb: if NULL, indicates that socket lock is already held
544 * @sock: socket structure 603 * @sock: socket structure
545 * @m: message to send 604 * @m: message to send
546 * @total_len: length of message 605 * @total_len: length of message
@@ -553,7 +612,8 @@ exit:
553static int send_packet(struct kiocb *iocb, struct socket *sock, 612static int send_packet(struct kiocb *iocb, struct socket *sock,
554 struct msghdr *m, size_t total_len) 613 struct msghdr *m, size_t total_len)
555{ 614{
556 struct tipc_sock *tsock = tipc_sk(sock->sk); 615 struct sock *sk = sock->sk;
616 struct tipc_port *tport = tipc_sk_port(sk);
557 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 617 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
558 int res; 618 int res;
559 619
@@ -562,9 +622,8 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
562 if (unlikely(dest)) 622 if (unlikely(dest))
563 return send_msg(iocb, sock, m, total_len); 623 return send_msg(iocb, sock, m, total_len);
564 624
565 if (down_interruptible(&tsock->sem)) { 625 if (iocb)
566 return -ERESTARTSYS; 626 lock_sock(sk);
567 }
568 627
569 do { 628 do {
570 if (unlikely(sock->state != SS_CONNECTED)) { 629 if (unlikely(sock->state != SS_CONNECTED)) {
@@ -572,25 +631,28 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
572 res = -EPIPE; 631 res = -EPIPE;
573 else 632 else
574 res = -ENOTCONN; 633 res = -ENOTCONN;
575 goto exit; 634 break;
576 } 635 }
577 636
578 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); 637 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov);
579 if (likely(res != -ELINKCONG)) { 638 if (likely(res != -ELINKCONG)) {
580exit: 639 break;
581 up(&tsock->sem);
582 return res;
583 } 640 }
584 if (m->msg_flags & MSG_DONTWAIT) { 641 if (m->msg_flags & MSG_DONTWAIT) {
585 res = -EWOULDBLOCK; 642 res = -EWOULDBLOCK;
586 goto exit; 643 break;
587 }
588 if (wait_event_interruptible(*sock->sk->sk_sleep,
589 !tsock->p->congested)) {
590 res = -ERESTARTSYS;
591 goto exit;
592 } 644 }
645 release_sock(sk);
646 res = wait_event_interruptible(*sk->sk_sleep,
647 (!tport->congested || !tport->connected));
648 lock_sock(sk);
649 if (res)
650 break;
593 } while (1); 651 } while (1);
652
653 if (iocb)
654 release_sock(sk);
655 return res;
594} 656}
595 657
596/** 658/**
@@ -606,11 +668,11 @@ exit:
606 * or errno if no data sent 668 * or errno if no data sent
607 */ 669 */
608 670
609
610static int send_stream(struct kiocb *iocb, struct socket *sock, 671static int send_stream(struct kiocb *iocb, struct socket *sock,
611 struct msghdr *m, size_t total_len) 672 struct msghdr *m, size_t total_len)
612{ 673{
613 struct tipc_port *tport; 674 struct sock *sk = sock->sk;
675 struct tipc_port *tport = tipc_sk_port(sk);
614 struct msghdr my_msg; 676 struct msghdr my_msg;
615 struct iovec my_iov; 677 struct iovec my_iov;
616 struct iovec *curr_iov; 678 struct iovec *curr_iov;
@@ -622,19 +684,27 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
622 int bytes_sent; 684 int bytes_sent;
623 int res; 685 int res;
624 686
687 lock_sock(sk);
688
625 /* Handle special cases where there is no connection */ 689 /* Handle special cases where there is no connection */
626 690
627 if (unlikely(sock->state != SS_CONNECTED)) { 691 if (unlikely(sock->state != SS_CONNECTED)) {
628 if (sock->state == SS_UNCONNECTED) 692 if (sock->state == SS_UNCONNECTED) {
629 return send_packet(iocb, sock, m, total_len); 693 res = send_packet(NULL, sock, m, total_len);
630 else if (sock->state == SS_DISCONNECTING) 694 goto exit;
631 return -EPIPE; 695 } else if (sock->state == SS_DISCONNECTING) {
632 else 696 res = -EPIPE;
633 return -ENOTCONN; 697 goto exit;
698 } else {
699 res = -ENOTCONN;
700 goto exit;
701 }
634 } 702 }
635 703
636 if (unlikely(m->msg_name)) 704 if (unlikely(m->msg_name)) {
637 return -EISCONN; 705 res = -EISCONN;
706 goto exit;
707 }
638 708
639 /* 709 /*
640 * Send each iovec entry using one or more messages 710 * Send each iovec entry using one or more messages
@@ -652,7 +722,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
652 my_msg.msg_name = NULL; 722 my_msg.msg_name = NULL;
653 bytes_sent = 0; 723 bytes_sent = 0;
654 724
655 tport = tipc_sk(sock->sk)->p;
656 hdr_size = msg_hdr_sz(&tport->phdr); 725 hdr_size = msg_hdr_sz(&tport->phdr);
657 726
658 while (curr_iovlen--) { 727 while (curr_iovlen--) {
@@ -667,10 +736,10 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
667 bytes_to_send = curr_left; 736 bytes_to_send = curr_left;
668 my_iov.iov_base = curr_start; 737 my_iov.iov_base = curr_start;
669 my_iov.iov_len = bytes_to_send; 738 my_iov.iov_len = bytes_to_send;
670 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) { 739 if ((res = send_packet(NULL, sock, &my_msg, 0)) < 0) {
671 if (bytes_sent != 0) 740 if (bytes_sent)
672 res = bytes_sent; 741 res = bytes_sent;
673 return res; 742 goto exit;
674 } 743 }
675 curr_left -= bytes_to_send; 744 curr_left -= bytes_to_send;
676 curr_start += bytes_to_send; 745 curr_start += bytes_to_send;
@@ -679,22 +748,23 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
679 748
680 curr_iov++; 749 curr_iov++;
681 } 750 }
682 751 res = bytes_sent;
683 return bytes_sent; 752exit:
753 release_sock(sk);
754 return res;
684} 755}
685 756
686/** 757/**
687 * auto_connect - complete connection setup to a remote port 758 * auto_connect - complete connection setup to a remote port
688 * @sock: socket structure 759 * @sock: socket structure
689 * @tsock: TIPC-specific socket structure
690 * @msg: peer's response message 760 * @msg: peer's response message
691 * 761 *
692 * Returns 0 on success, errno otherwise 762 * Returns 0 on success, errno otherwise
693 */ 763 */
694 764
695static int auto_connect(struct socket *sock, struct tipc_sock *tsock, 765static int auto_connect(struct socket *sock, struct tipc_msg *msg)
696 struct tipc_msg *msg)
697{ 766{
767 struct tipc_port *tport = tipc_sk_port(sock->sk);
698 struct tipc_portid peer; 768 struct tipc_portid peer;
699 769
700 if (msg_errcode(msg)) { 770 if (msg_errcode(msg)) {
@@ -704,8 +774,8 @@ static int auto_connect(struct socket *sock, struct tipc_sock *tsock,
704 774
705 peer.ref = msg_origport(msg); 775 peer.ref = msg_origport(msg);
706 peer.node = msg_orignode(msg); 776 peer.node = msg_orignode(msg);
707 tipc_connect2port(tsock->p->ref, &peer); 777 tipc_connect2port(tport->ref, &peer);
708 tipc_set_portimportance(tsock->p->ref, msg_importance(msg)); 778 tipc_set_portimportance(tport->ref, msg_importance(msg));
709 sock->state = SS_CONNECTED; 779 sock->state = SS_CONNECTED;
710 return 0; 780 return 0;
711} 781}
@@ -818,62 +888,54 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
818static int recv_msg(struct kiocb *iocb, struct socket *sock, 888static int recv_msg(struct kiocb *iocb, struct socket *sock,
819 struct msghdr *m, size_t buf_len, int flags) 889 struct msghdr *m, size_t buf_len, int flags)
820{ 890{
821 struct tipc_sock *tsock = tipc_sk(sock->sk); 891 struct sock *sk = sock->sk;
892 struct tipc_port *tport = tipc_sk_port(sk);
822 struct sk_buff *buf; 893 struct sk_buff *buf;
823 struct tipc_msg *msg; 894 struct tipc_msg *msg;
824 unsigned int q_len;
825 unsigned int sz; 895 unsigned int sz;
826 u32 err; 896 u32 err;
827 int res; 897 int res;
828 898
829 /* Currently doesn't support receiving into multiple iovec entries */ 899 /* Catch invalid receive requests */
830 900
831 if (m->msg_iovlen != 1) 901 if (m->msg_iovlen != 1)
832 return -EOPNOTSUPP; 902 return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */
833
834 /* Catch invalid receive attempts */
835 903
836 if (unlikely(!buf_len)) 904 if (unlikely(!buf_len))
837 return -EINVAL; 905 return -EINVAL;
838 906
839 if (sock->type == SOCK_SEQPACKET) { 907 lock_sock(sk);
840 if (unlikely(sock->state == SS_UNCONNECTED))
841 return -ENOTCONN;
842 if (unlikely((sock->state == SS_DISCONNECTING) &&
843 (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
844 return -ENOTCONN;
845 }
846 908
847 /* Look for a message in receive queue; wait if necessary */ 909 if (unlikely(sock->state == SS_UNCONNECTED)) {
848 910 res = -ENOTCONN;
849 if (unlikely(down_interruptible(&tsock->sem)))
850 return -ERESTARTSYS;
851
852restart:
853 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
854 (flags & MSG_DONTWAIT))) {
855 res = -EWOULDBLOCK;
856 goto exit; 911 goto exit;
857 } 912 }
858 913
859 if ((res = wait_event_interruptible( 914restart:
860 *sock->sk->sk_sleep,
861 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
862 (sock->state == SS_DISCONNECTING))) )) {
863 goto exit;
864 }
865 915
866 /* Catch attempt to receive on an already terminated connection */ 916 /* Look for a message in receive queue; wait if necessary */
867 /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
868 917
869 if (!q_len) { 918 while (skb_queue_empty(&sk->sk_receive_queue)) {
870 res = -ENOTCONN; 919 if (sock->state == SS_DISCONNECTING) {
871 goto exit; 920 res = -ENOTCONN;
921 goto exit;
922 }
923 if (flags & MSG_DONTWAIT) {
924 res = -EWOULDBLOCK;
925 goto exit;
926 }
927 release_sock(sk);
928 res = wait_event_interruptible(*sk->sk_sleep,
929 (!skb_queue_empty(&sk->sk_receive_queue) ||
930 (sock->state == SS_DISCONNECTING)));
931 lock_sock(sk);
932 if (res)
933 goto exit;
872 } 934 }
873 935
874 /* Get access to first message in receive queue */ 936 /* Look at first message in receive queue */
875 937
876 buf = skb_peek(&sock->sk->sk_receive_queue); 938 buf = skb_peek(&sk->sk_receive_queue);
877 msg = buf_msg(buf); 939 msg = buf_msg(buf);
878 sz = msg_data_sz(msg); 940 sz = msg_data_sz(msg);
879 err = msg_errcode(msg); 941 err = msg_errcode(msg);
@@ -881,14 +943,15 @@ restart:
881 /* Complete connection setup for an implied connect */ 943 /* Complete connection setup for an implied connect */
882 944
883 if (unlikely(sock->state == SS_CONNECTING)) { 945 if (unlikely(sock->state == SS_CONNECTING)) {
884 if ((res = auto_connect(sock, tsock, msg))) 946 res = auto_connect(sock, msg);
947 if (res)
885 goto exit; 948 goto exit;
886 } 949 }
887 950
888 /* Discard an empty non-errored message & try again */ 951 /* Discard an empty non-errored message & try again */
889 952
890 if ((!sz) && (!err)) { 953 if ((!sz) && (!err)) {
891 advance_queue(tsock); 954 advance_rx_queue(sk);
892 goto restart; 955 goto restart;
893 } 956 }
894 957
@@ -898,7 +961,8 @@ restart:
898 961
899 /* Capture ancillary data (optional) */ 962 /* Capture ancillary data (optional) */
900 963
901 if ((res = anc_data_recv(m, msg, tsock->p))) 964 res = anc_data_recv(m, msg, tport);
965 if (res)
902 goto exit; 966 goto exit;
903 967
904 /* Capture message data (if valid) & compute return value (always) */ 968 /* Capture message data (if valid) & compute return value (always) */
@@ -925,12 +989,13 @@ restart:
925 /* Consume received message (optional) */ 989 /* Consume received message (optional) */
926 990
927 if (likely(!(flags & MSG_PEEK))) { 991 if (likely(!(flags & MSG_PEEK))) {
928 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 992 if ((sock->state != SS_READY) &&
929 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked); 993 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
930 advance_queue(tsock); 994 tipc_acknowledge(tport->ref, tport->conn_unacked);
995 advance_rx_queue(sk);
931 } 996 }
932exit: 997exit:
933 up(&tsock->sem); 998 release_sock(sk);
934 return res; 999 return res;
935} 1000}
936 1001
@@ -950,10 +1015,10 @@ exit:
950static int recv_stream(struct kiocb *iocb, struct socket *sock, 1015static int recv_stream(struct kiocb *iocb, struct socket *sock,
951 struct msghdr *m, size_t buf_len, int flags) 1016 struct msghdr *m, size_t buf_len, int flags)
952{ 1017{
953 struct tipc_sock *tsock = tipc_sk(sock->sk); 1018 struct sock *sk = sock->sk;
1019 struct tipc_port *tport = tipc_sk_port(sk);
954 struct sk_buff *buf; 1020 struct sk_buff *buf;
955 struct tipc_msg *msg; 1021 struct tipc_msg *msg;
956 unsigned int q_len;
957 unsigned int sz; 1022 unsigned int sz;
958 int sz_to_copy; 1023 int sz_to_copy;
959 int sz_copied = 0; 1024 int sz_copied = 0;
@@ -961,54 +1026,49 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
961 char __user *crs = m->msg_iov->iov_base; 1026 char __user *crs = m->msg_iov->iov_base;
962 unsigned char *buf_crs; 1027 unsigned char *buf_crs;
963 u32 err; 1028 u32 err;
964 int res; 1029 int res = 0;
965 1030
966 /* Currently doesn't support receiving into multiple iovec entries */ 1031 /* Catch invalid receive attempts */
967 1032
968 if (m->msg_iovlen != 1) 1033 if (m->msg_iovlen != 1)
969 return -EOPNOTSUPP; 1034 return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */
970
971 /* Catch invalid receive attempts */
972 1035
973 if (unlikely(!buf_len)) 1036 if (unlikely(!buf_len))
974 return -EINVAL; 1037 return -EINVAL;
975 1038
976 if (unlikely(sock->state == SS_DISCONNECTING)) { 1039 lock_sock(sk);
977 if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
978 return -ENOTCONN;
979 } else if (unlikely(sock->state != SS_CONNECTED))
980 return -ENOTCONN;
981 1040
982 /* Look for a message in receive queue; wait if necessary */ 1041 if (unlikely((sock->state == SS_UNCONNECTED) ||
983 1042 (sock->state == SS_CONNECTING))) {
984 if (unlikely(down_interruptible(&tsock->sem))) 1043 res = -ENOTCONN;
985 return -ERESTARTSYS;
986
987restart:
988 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
989 (flags & MSG_DONTWAIT))) {
990 res = -EWOULDBLOCK;
991 goto exit; 1044 goto exit;
992 } 1045 }
993 1046
994 if ((res = wait_event_interruptible( 1047restart:
995 *sock->sk->sk_sleep,
996 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
997 (sock->state == SS_DISCONNECTING))) )) {
998 goto exit;
999 }
1000 1048
1001 /* Catch attempt to receive on an already terminated connection */ 1049 /* Look for a message in receive queue; wait if necessary */
1002 /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
1003 1050
1004 if (!q_len) { 1051 while (skb_queue_empty(&sk->sk_receive_queue)) {
1005 res = -ENOTCONN; 1052 if (sock->state == SS_DISCONNECTING) {
1006 goto exit; 1053 res = -ENOTCONN;
1054 goto exit;
1055 }
1056 if (flags & MSG_DONTWAIT) {
1057 res = -EWOULDBLOCK;
1058 goto exit;
1059 }
1060 release_sock(sk);
1061 res = wait_event_interruptible(*sk->sk_sleep,
1062 (!skb_queue_empty(&sk->sk_receive_queue) ||
1063 (sock->state == SS_DISCONNECTING)));
1064 lock_sock(sk);
1065 if (res)
1066 goto exit;
1007 } 1067 }
1008 1068
1009 /* Get access to first message in receive queue */ 1069 /* Look at first message in receive queue */
1010 1070
1011 buf = skb_peek(&sock->sk->sk_receive_queue); 1071 buf = skb_peek(&sk->sk_receive_queue);
1012 msg = buf_msg(buf); 1072 msg = buf_msg(buf);
1013 sz = msg_data_sz(msg); 1073 sz = msg_data_sz(msg);
1014 err = msg_errcode(msg); 1074 err = msg_errcode(msg);
@@ -1016,7 +1076,7 @@ restart:
1016 /* Discard an empty non-errored message & try again */ 1076 /* Discard an empty non-errored message & try again */
1017 1077
1018 if ((!sz) && (!err)) { 1078 if ((!sz) && (!err)) {
1019 advance_queue(tsock); 1079 advance_rx_queue(sk);
1020 goto restart; 1080 goto restart;
1021 } 1081 }
1022 1082
@@ -1024,7 +1084,8 @@ restart:
1024 1084
1025 if (sz_copied == 0) { 1085 if (sz_copied == 0) {
1026 set_orig_addr(m, msg); 1086 set_orig_addr(m, msg);
1027 if ((res = anc_data_recv(m, msg, tsock->p))) 1087 res = anc_data_recv(m, msg, tport);
1088 if (res)
1028 goto exit; 1089 goto exit;
1029 } 1090 }
1030 1091
@@ -1032,7 +1093,7 @@ restart:
1032 1093
1033 if (!err) { 1094 if (!err) {
1034 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle); 1095 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
1035 sz = skb_tail_pointer(buf) - buf_crs; 1096 sz = (unsigned char *)msg + msg_size(msg) - buf_crs;
1036 1097
1037 needed = (buf_len - sz_copied); 1098 needed = (buf_len - sz_copied);
1038 sz_to_copy = (sz <= needed) ? sz : needed; 1099 sz_to_copy = (sz <= needed) ? sz : needed;
@@ -1062,35 +1123,37 @@ restart:
1062 /* Consume received message (optional) */ 1123 /* Consume received message (optional) */
1063 1124
1064 if (likely(!(flags & MSG_PEEK))) { 1125 if (likely(!(flags & MSG_PEEK))) {
1065 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1126 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1066 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked); 1127 tipc_acknowledge(tport->ref, tport->conn_unacked);
1067 advance_queue(tsock); 1128 advance_rx_queue(sk);
1068 } 1129 }
1069 1130
1070 /* Loop around if more data is required */ 1131 /* Loop around if more data is required */
1071 1132
1072 if ((sz_copied < buf_len) /* didn't get all requested data */ 1133 if ((sz_copied < buf_len) /* didn't get all requested data */
1073 && (flags & MSG_WAITALL) /* ... and need to wait for more */ 1134 && (!skb_queue_empty(&sock->sk->sk_receive_queue) ||
1135 (flags & MSG_WAITALL))
1136 /* ... and more is ready or required */
1074 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */ 1137 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
1075 && (!err) /* ... and haven't reached a FIN */ 1138 && (!err) /* ... and haven't reached a FIN */
1076 ) 1139 )
1077 goto restart; 1140 goto restart;
1078 1141
1079exit: 1142exit:
1080 up(&tsock->sem); 1143 release_sock(sk);
1081 return sz_copied ? sz_copied : res; 1144 return sz_copied ? sz_copied : res;
1082} 1145}
1083 1146
1084/** 1147/**
1085 * queue_overloaded - test if queue overload condition exists 1148 * rx_queue_full - determine if receive queue can accept another message
1149 * @msg: message to be added to queue
1086 * @queue_size: current size of queue 1150 * @queue_size: current size of queue
1087 * @base: nominal maximum size of queue 1151 * @base: nominal maximum size of queue
1088 * @msg: message to be added to queue
1089 * 1152 *
1090 * Returns 1 if queue is currently overloaded, 0 otherwise 1153 * Returns 1 if queue is unable to accept message, 0 otherwise
1091 */ 1154 */
1092 1155
1093static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg) 1156static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1094{ 1157{
1095 u32 threshold; 1158 u32 threshold;
1096 u32 imp = msg_importance(msg); 1159 u32 imp = msg_importance(msg);
@@ -1107,41 +1170,28 @@ static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
1107 if (msg_connected(msg)) 1170 if (msg_connected(msg))
1108 threshold *= 4; 1171 threshold *= 4;
1109 1172
1110 return (queue_size > threshold); 1173 return (queue_size >= threshold);
1111}
1112
1113/**
1114 * async_disconnect - wrapper function used to disconnect port
1115 * @portref: TIPC port reference (passed as pointer-sized value)
1116 */
1117
1118static void async_disconnect(unsigned long portref)
1119{
1120 tipc_disconnect((u32)portref);
1121} 1174}
1122 1175
1123/** 1176/**
1124 * dispatch - handle arriving message 1177 * filter_rcv - validate incoming message
1125 * @tport: TIPC port that received message 1178 * @sk: socket
1126 * @buf: message 1179 * @buf: message
1127 * 1180 *
1128 * Called with port locked. Must not take socket lock to avoid deadlock risk. 1181 * Enqueues message on receive queue if acceptable; optionally handles
1182 * disconnect indication for a connected socket.
1183 *
1184 * Called with socket lock already taken; port lock may also be taken.
1129 * 1185 *
1130 * Returns TIPC error status code (TIPC_OK if message is not to be rejected) 1186 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1131 */ 1187 */
1132 1188
1133static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) 1189static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1134{ 1190{
1191 struct socket *sock = sk->sk_socket;
1135 struct tipc_msg *msg = buf_msg(buf); 1192 struct tipc_msg *msg = buf_msg(buf);
1136 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
1137 struct socket *sock;
1138 u32 recv_q_len; 1193 u32 recv_q_len;
1139 1194
1140 /* Reject message if socket is closing */
1141
1142 if (!tsock)
1143 return TIPC_ERR_NO_PORT;
1144
1145 /* Reject message if it is wrong sort of message for socket */ 1195 /* Reject message if it is wrong sort of message for socket */
1146 1196
1147 /* 1197 /*
@@ -1149,7 +1199,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1149 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY 1199 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
1150 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC 1200 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
1151 */ 1201 */
1152 sock = tsock->sk.sk_socket; 1202
1153 if (sock->state == SS_READY) { 1203 if (sock->state == SS_READY) {
1154 if (msg_connected(msg)) { 1204 if (msg_connected(msg)) {
1155 msg_dbg(msg, "dispatch filter 1\n"); 1205 msg_dbg(msg, "dispatch filter 1\n");
@@ -1192,52 +1242,103 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1192 1242
1193 /* Reject message if there isn't room to queue it */ 1243 /* Reject message if there isn't room to queue it */
1194 1244
1195 if (unlikely((u32)atomic_read(&tipc_queue_size) > 1245 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1196 OVERLOAD_LIMIT_BASE)) { 1246 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1197 if (queue_overloaded(atomic_read(&tipc_queue_size), 1247 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
1198 OVERLOAD_LIMIT_BASE, msg))
1199 return TIPC_ERR_OVERLOAD; 1248 return TIPC_ERR_OVERLOAD;
1200 } 1249 }
1201 recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue); 1250 recv_q_len = skb_queue_len(&sk->sk_receive_queue);
1202 if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) { 1251 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
1203 if (queue_overloaded(recv_q_len, 1252 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
1204 OVERLOAD_LIMIT_BASE / 2, msg))
1205 return TIPC_ERR_OVERLOAD; 1253 return TIPC_ERR_OVERLOAD;
1206 } 1254 }
1207 1255
1256 /* Enqueue message (finally!) */
1257
1258 msg_dbg(msg, "<DISP<: ");
1259 TIPC_SKB_CB(buf)->handle = msg_data(msg);
1260 atomic_inc(&tipc_queue_size);
1261 __skb_queue_tail(&sk->sk_receive_queue, buf);
1262
1208 /* Initiate connection termination for an incoming 'FIN' */ 1263 /* Initiate connection termination for an incoming 'FIN' */
1209 1264
1210 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { 1265 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1211 sock->state = SS_DISCONNECTING; 1266 sock->state = SS_DISCONNECTING;
1212 /* Note: Use signal since port lock is already taken! */ 1267 tipc_disconnect_port(tipc_sk_port(sk));
1213 tipc_k_signal((Handler)async_disconnect, tport->ref);
1214 } 1268 }
1215 1269
1216 /* Enqueue message (finally!) */ 1270 if (waitqueue_active(sk->sk_sleep))
1271 wake_up_interruptible(sk->sk_sleep);
1272 return TIPC_OK;
1273}
1217 1274
1218 msg_dbg(msg,"<DISP<: "); 1275/**
1219 TIPC_SKB_CB(buf)->handle = msg_data(msg); 1276 * backlog_rcv - handle incoming message from backlog queue
1220 atomic_inc(&tipc_queue_size); 1277 * @sk: socket
1221 skb_queue_tail(&sock->sk->sk_receive_queue, buf); 1278 * @buf: message
1279 *
1280 * Caller must hold socket lock, but not port lock.
1281 *
1282 * Returns 0
1283 */
1222 1284
1223 if (waitqueue_active(sock->sk->sk_sleep)) 1285static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1224 wake_up_interruptible(sock->sk->sk_sleep); 1286{
1225 return TIPC_OK; 1287 u32 res;
1288
1289 res = filter_rcv(sk, buf);
1290 if (res)
1291 tipc_reject_msg(buf, res);
1292 return 0;
1293}
1294
1295/**
1296 * dispatch - handle incoming message
1297 * @tport: TIPC port that received message
1298 * @buf: message
1299 *
1300 * Called with port lock already taken.
1301 *
1302 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1303 */
1304
1305static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1306{
1307 struct sock *sk = (struct sock *)tport->usr_handle;
1308 u32 res;
1309
1310 /*
1311 * Process message if socket is unlocked; otherwise add to backlog queue
1312 *
1313 * This code is based on sk_receive_skb(), but must be distinct from it
1314 * since a TIPC-specific filter/reject mechanism is utilized
1315 */
1316
1317 bh_lock_sock(sk);
1318 if (!sock_owned_by_user(sk)) {
1319 res = filter_rcv(sk, buf);
1320 } else {
1321 sk_add_backlog(sk, buf);
1322 res = TIPC_OK;
1323 }
1324 bh_unlock_sock(sk);
1325
1326 return res;
1226} 1327}
1227 1328
1228/** 1329/**
1229 * wakeupdispatch - wake up port after congestion 1330 * wakeupdispatch - wake up port after congestion
1230 * @tport: port to wakeup 1331 * @tport: port to wakeup
1231 * 1332 *
1232 * Called with port lock on. 1333 * Called with port lock already taken.
1233 */ 1334 */
1234 1335
1235static void wakeupdispatch(struct tipc_port *tport) 1336static void wakeupdispatch(struct tipc_port *tport)
1236{ 1337{
1237 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle; 1338 struct sock *sk = (struct sock *)tport->usr_handle;
1238 1339
1239 if (waitqueue_active(tsock->sk.sk_sleep)) 1340 if (waitqueue_active(sk->sk_sleep))
1240 wake_up_interruptible(tsock->sk.sk_sleep); 1341 wake_up_interruptible(sk->sk_sleep);
1241} 1342}
1242 1343
1243/** 1344/**
@@ -1245,7 +1346,7 @@ static void wakeupdispatch(struct tipc_port *tport)
1245 * @sock: socket structure 1346 * @sock: socket structure
1246 * @dest: socket address for destination port 1347 * @dest: socket address for destination port
1247 * @destlen: size of socket address data structure 1348 * @destlen: size of socket address data structure
1248 * @flags: (unused) 1349 * @flags: file-related flags associated with socket
1249 * 1350 *
1250 * Returns 0 on success, errno otherwise 1351 * Returns 0 on success, errno otherwise
1251 */ 1352 */
@@ -1253,72 +1354,105 @@ static void wakeupdispatch(struct tipc_port *tport)
1253static int connect(struct socket *sock, struct sockaddr *dest, int destlen, 1354static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1254 int flags) 1355 int flags)
1255{ 1356{
1256 struct tipc_sock *tsock = tipc_sk(sock->sk); 1357 struct sock *sk = sock->sk;
1257 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 1358 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1258 struct msghdr m = {NULL,}; 1359 struct msghdr m = {NULL,};
1259 struct sk_buff *buf; 1360 struct sk_buff *buf;
1260 struct tipc_msg *msg; 1361 struct tipc_msg *msg;
1261 int res; 1362 int res;
1262 1363
1263 /* For now, TIPC does not allow use of connect() with DGRAM or RDM types */ 1364 lock_sock(sk);
1264 1365
1265 if (sock->state == SS_READY) 1366 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1266 return -EOPNOTSUPP; 1367
1267 1368 if (sock->state == SS_READY) {
1268 /* Issue Posix-compliant error code if socket is in the wrong state */ 1369 res = -EOPNOTSUPP;
1269 1370 goto exit;
1270 if (sock->state == SS_LISTENING) 1371 }
1271 return -EOPNOTSUPP; 1372
1272 if (sock->state == SS_CONNECTING) 1373 /* For now, TIPC does not support the non-blocking form of connect() */
1273 return -EALREADY; 1374
1274 if (sock->state != SS_UNCONNECTED) 1375 if (flags & O_NONBLOCK) {
1275 return -EISCONN; 1376 res = -EWOULDBLOCK;
1276 1377 goto exit;
1277 /* 1378 }
1278 * Reject connection attempt using multicast address 1379
1279 * 1380 /* Issue Posix-compliant error code if socket is in the wrong state */
1280 * Note: send_msg() validates the rest of the address fields, 1381
1281 * so there's no need to do it here 1382 if (sock->state == SS_LISTENING) {
1282 */ 1383 res = -EOPNOTSUPP;
1283 1384 goto exit;
1284 if (dst->addrtype == TIPC_ADDR_MCAST) 1385 }
1285 return -EINVAL; 1386 if (sock->state == SS_CONNECTING) {
1286 1387 res = -EALREADY;
1287 /* Send a 'SYN-' to destination */ 1388 goto exit;
1288 1389 }
1289 m.msg_name = dest; 1390 if (sock->state != SS_UNCONNECTED) {
1290 m.msg_namelen = destlen; 1391 res = -EISCONN;
1291 if ((res = send_msg(NULL, sock, &m, 0)) < 0) { 1392 goto exit;
1292 sock->state = SS_DISCONNECTING; 1393 }
1293 return res; 1394
1294 } 1395 /*
1295 1396 * Reject connection attempt using multicast address
1296 if (down_interruptible(&tsock->sem)) 1397 *
1297 return -ERESTARTSYS; 1398 * Note: send_msg() validates the rest of the address fields,
1298 1399 * so there's no need to do it here
1299 /* Wait for destination's 'ACK' response */ 1400 */
1300 1401
1301 res = wait_event_interruptible_timeout(*sock->sk->sk_sleep, 1402 if (dst->addrtype == TIPC_ADDR_MCAST) {
1302 skb_queue_len(&sock->sk->sk_receive_queue), 1403 res = -EINVAL;
1303 sock->sk->sk_rcvtimeo); 1404 goto exit;
1304 buf = skb_peek(&sock->sk->sk_receive_queue); 1405 }
1305 if (res > 0) { 1406
1306 msg = buf_msg(buf); 1407 /* Reject any messages already in receive queue (very unlikely) */
1307 res = auto_connect(sock, tsock, msg); 1408
1308 if (!res) { 1409 reject_rx_queue(sk);
1309 if (!msg_data_sz(msg)) 1410
1310 advance_queue(tsock); 1411 /* Send a 'SYN-' to destination */
1311 } 1412
1312 } else { 1413 m.msg_name = dest;
1313 if (res == 0) { 1414 m.msg_namelen = destlen;
1314 res = -ETIMEDOUT; 1415 res = send_msg(NULL, sock, &m, 0);
1315 } else 1416 if (res < 0) {
1316 { /* leave "res" unchanged */ } 1417 goto exit;
1317 sock->state = SS_DISCONNECTING; 1418 }
1318 } 1419
1319 1420 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1320 up(&tsock->sem); 1421
1321 return res; 1422 release_sock(sk);
1423 res = wait_event_interruptible_timeout(*sk->sk_sleep,
1424 (!skb_queue_empty(&sk->sk_receive_queue) ||
1425 (sock->state != SS_CONNECTING)),
1426 sk->sk_rcvtimeo);
1427 lock_sock(sk);
1428
1429 if (res > 0) {
1430 buf = skb_peek(&sk->sk_receive_queue);
1431 if (buf != NULL) {
1432 msg = buf_msg(buf);
1433 res = auto_connect(sock, msg);
1434 if (!res) {
1435 if (!msg_data_sz(msg))
1436 advance_rx_queue(sk);
1437 }
1438 } else {
1439 if (sock->state == SS_CONNECTED) {
1440 res = -EISCONN;
1441 } else {
1442 res = -ECONNREFUSED;
1443 }
1444 }
1445 } else {
1446 if (res == 0)
1447 res = -ETIMEDOUT;
1448 else
1449 ; /* leave "res" unchanged */
1450 sock->state = SS_DISCONNECTING;
1451 }
1452
1453exit:
1454 release_sock(sk);
1455 return res;
1322} 1456}
1323 1457
1324/** 1458/**
@@ -1331,14 +1465,22 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1331 1465
1332static int listen(struct socket *sock, int len) 1466static int listen(struct socket *sock, int len)
1333{ 1467{
1334 /* REQUIRES SOCKET LOCKING OF SOME SORT? */ 1468 struct sock *sk = sock->sk;
1469 int res;
1470
1471 lock_sock(sk);
1335 1472
1336 if (sock->state == SS_READY) 1473 if (sock->state == SS_READY)
1337 return -EOPNOTSUPP; 1474 res = -EOPNOTSUPP;
1338 if (sock->state != SS_UNCONNECTED) 1475 else if (sock->state != SS_UNCONNECTED)
1339 return -EINVAL; 1476 res = -EINVAL;
1340 sock->state = SS_LISTENING; 1477 else {
1341 return 0; 1478 sock->state = SS_LISTENING;
1479 res = 0;
1480 }
1481
1482 release_sock(sk);
1483 return res;
1342} 1484}
1343 1485
1344/** 1486/**
@@ -1350,50 +1492,69 @@ static int listen(struct socket *sock, int len)
1350 * Returns 0 on success, errno otherwise 1492 * Returns 0 on success, errno otherwise
1351 */ 1493 */
1352 1494
1353static int accept(struct socket *sock, struct socket *newsock, int flags) 1495static int accept(struct socket *sock, struct socket *new_sock, int flags)
1354{ 1496{
1355 struct tipc_sock *tsock = tipc_sk(sock->sk); 1497 struct sock *sk = sock->sk;
1356 struct sk_buff *buf; 1498 struct sk_buff *buf;
1357 int res = -EFAULT; 1499 int res;
1358
1359 if (sock->state == SS_READY)
1360 return -EOPNOTSUPP;
1361 if (sock->state != SS_LISTENING)
1362 return -EINVAL;
1363
1364 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
1365 (flags & O_NONBLOCK)))
1366 return -EWOULDBLOCK;
1367 1500
1368 if (down_interruptible(&tsock->sem)) 1501 lock_sock(sk);
1369 return -ERESTARTSYS;
1370 1502
1371 if (wait_event_interruptible(*sock->sk->sk_sleep, 1503 if (sock->state == SS_READY) {
1372 skb_queue_len(&sock->sk->sk_receive_queue))) { 1504 res = -EOPNOTSUPP;
1373 res = -ERESTARTSYS; 1505 goto exit;
1506 }
1507 if (sock->state != SS_LISTENING) {
1508 res = -EINVAL;
1374 goto exit; 1509 goto exit;
1375 } 1510 }
1376 buf = skb_peek(&sock->sk->sk_receive_queue);
1377 1511
1378 res = tipc_create(sock->sk->sk_net, newsock, 0); 1512 while (skb_queue_empty(&sk->sk_receive_queue)) {
1513 if (flags & O_NONBLOCK) {
1514 res = -EWOULDBLOCK;
1515 goto exit;
1516 }
1517 release_sock(sk);
1518 res = wait_event_interruptible(*sk->sk_sleep,
1519 (!skb_queue_empty(&sk->sk_receive_queue)));
1520 lock_sock(sk);
1521 if (res)
1522 goto exit;
1523 }
1524
1525 buf = skb_peek(&sk->sk_receive_queue);
1526
1527 res = tipc_create(sock_net(sock->sk), new_sock, 0);
1379 if (!res) { 1528 if (!res) {
1380 struct tipc_sock *new_tsock = tipc_sk(newsock->sk); 1529 struct sock *new_sk = new_sock->sk;
1530 struct tipc_port *new_tport = tipc_sk_port(new_sk);
1531 u32 new_ref = new_tport->ref;
1381 struct tipc_portid id; 1532 struct tipc_portid id;
1382 struct tipc_msg *msg = buf_msg(buf); 1533 struct tipc_msg *msg = buf_msg(buf);
1383 u32 new_ref = new_tsock->p->ref; 1534
1535 lock_sock(new_sk);
1536
1537 /*
1538 * Reject any stray messages received by new socket
1539 * before the socket lock was taken (very, very unlikely)
1540 */
1541
1542 reject_rx_queue(new_sk);
1543
1544 /* Connect new socket to it's peer */
1384 1545
1385 id.ref = msg_origport(msg); 1546 id.ref = msg_origport(msg);
1386 id.node = msg_orignode(msg); 1547 id.node = msg_orignode(msg);
1387 tipc_connect2port(new_ref, &id); 1548 tipc_connect2port(new_ref, &id);
1388 newsock->state = SS_CONNECTED; 1549 new_sock->state = SS_CONNECTED;
1389 1550
1390 tipc_set_portimportance(new_ref, msg_importance(msg)); 1551 tipc_set_portimportance(new_ref, msg_importance(msg));
1391 if (msg_named(msg)) { 1552 if (msg_named(msg)) {
1392 new_tsock->p->conn_type = msg_nametype(msg); 1553 new_tport->conn_type = msg_nametype(msg);
1393 new_tsock->p->conn_instance = msg_nameinst(msg); 1554 new_tport->conn_instance = msg_nameinst(msg);
1394 } 1555 }
1395 1556
1396 /* 1557 /*
1397 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 1558 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1398 * Respond to 'SYN+' by queuing it on new socket. 1559 * Respond to 'SYN+' by queuing it on new socket.
1399 */ 1560 */
@@ -1402,24 +1563,23 @@ static int accept(struct socket *sock, struct socket *newsock, int flags)
1402 if (!msg_data_sz(msg)) { 1563 if (!msg_data_sz(msg)) {
1403 struct msghdr m = {NULL,}; 1564 struct msghdr m = {NULL,};
1404 1565
1405 send_packet(NULL, newsock, &m, 0); 1566 advance_rx_queue(sk);
1406 advance_queue(tsock); 1567 send_packet(NULL, new_sock, &m, 0);
1407 } else { 1568 } else {
1408 sock_lock(tsock); 1569 __skb_dequeue(&sk->sk_receive_queue);
1409 skb_dequeue(&sock->sk->sk_receive_queue); 1570 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1410 sock_unlock(tsock);
1411 skb_queue_head(&newsock->sk->sk_receive_queue, buf);
1412 } 1571 }
1572 release_sock(new_sk);
1413 } 1573 }
1414exit: 1574exit:
1415 up(&tsock->sem); 1575 release_sock(sk);
1416 return res; 1576 return res;
1417} 1577}
1418 1578
1419/** 1579/**
1420 * shutdown - shutdown socket connection 1580 * shutdown - shutdown socket connection
1421 * @sock: socket structure 1581 * @sock: socket structure
1422 * @how: direction to close (unused; always treated as read + write) 1582 * @how: direction to close (must be SHUT_RDWR)
1423 * 1583 *
1424 * Terminates connection (if necessary), then purges socket's receive queue. 1584 * Terminates connection (if necessary), then purges socket's receive queue.
1425 * 1585 *
@@ -1428,53 +1588,46 @@ exit:
1428 1588
1429static int shutdown(struct socket *sock, int how) 1589static int shutdown(struct socket *sock, int how)
1430{ 1590{
1431 struct tipc_sock* tsock = tipc_sk(sock->sk); 1591 struct sock *sk = sock->sk;
1592 struct tipc_port *tport = tipc_sk_port(sk);
1432 struct sk_buff *buf; 1593 struct sk_buff *buf;
1433 int res; 1594 int res;
1434 1595
1435 /* Could return -EINVAL for an invalid "how", but why bother? */ 1596 if (how != SHUT_RDWR)
1436 1597 return -EINVAL;
1437 if (down_interruptible(&tsock->sem))
1438 return -ERESTARTSYS;
1439 1598
1440 sock_lock(tsock); 1599 lock_sock(sk);
1441 1600
1442 switch (sock->state) { 1601 switch (sock->state) {
1602 case SS_CONNECTING:
1443 case SS_CONNECTED: 1603 case SS_CONNECTED:
1444 1604
1445 /* Send 'FIN+' or 'FIN-' message to peer */ 1605 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1446
1447 sock_unlock(tsock);
1448restart: 1606restart:
1449 if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) { 1607 buf = __skb_dequeue(&sk->sk_receive_queue);
1608 if (buf) {
1450 atomic_dec(&tipc_queue_size); 1609 atomic_dec(&tipc_queue_size);
1451 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) { 1610 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
1452 buf_discard(buf); 1611 buf_discard(buf);
1453 goto restart; 1612 goto restart;
1454 } 1613 }
1614 tipc_disconnect(tport->ref);
1455 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN); 1615 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1616 } else {
1617 tipc_shutdown(tport->ref);
1456 } 1618 }
1457 else { 1619
1458 tipc_shutdown(tsock->p->ref); 1620 sock->state = SS_DISCONNECTING;
1459 }
1460 sock_lock(tsock);
1461 1621
1462 /* fall through */ 1622 /* fall through */
1463 1623
1464 case SS_DISCONNECTING: 1624 case SS_DISCONNECTING:
1465 1625
1466 /* Discard any unreceived messages */ 1626 /* Discard any unreceived messages; wake up sleeping tasks */
1467 1627
1468 while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) { 1628 discard_rx_queue(sk);
1469 atomic_dec(&tipc_queue_size); 1629 if (waitqueue_active(sk->sk_sleep))
1470 buf_discard(buf); 1630 wake_up_interruptible(sk->sk_sleep);
1471 }
1472 tsock->p->conn_unacked = 0;
1473
1474 /* fall through */
1475
1476 case SS_CONNECTING:
1477 sock->state = SS_DISCONNECTING;
1478 res = 0; 1631 res = 0;
1479 break; 1632 break;
1480 1633
@@ -1482,9 +1635,7 @@ restart:
1482 res = -ENOTCONN; 1635 res = -ENOTCONN;
1483 } 1636 }
1484 1637
1485 sock_unlock(tsock); 1638 release_sock(sk);
1486
1487 up(&tsock->sem);
1488 return res; 1639 return res;
1489} 1640}
1490 1641
@@ -1505,7 +1656,8 @@ restart:
1505static int setsockopt(struct socket *sock, 1656static int setsockopt(struct socket *sock,
1506 int lvl, int opt, char __user *ov, int ol) 1657 int lvl, int opt, char __user *ov, int ol)
1507{ 1658{
1508 struct tipc_sock *tsock = tipc_sk(sock->sk); 1659 struct sock *sk = sock->sk;
1660 struct tipc_port *tport = tipc_sk_port(sk);
1509 u32 value; 1661 u32 value;
1510 int res; 1662 int res;
1511 1663
@@ -1518,30 +1670,31 @@ static int setsockopt(struct socket *sock,
1518 if ((res = get_user(value, (u32 __user *)ov))) 1670 if ((res = get_user(value, (u32 __user *)ov)))
1519 return res; 1671 return res;
1520 1672
1521 if (down_interruptible(&tsock->sem)) 1673 lock_sock(sk);
1522 return -ERESTARTSYS;
1523 1674
1524 switch (opt) { 1675 switch (opt) {
1525 case TIPC_IMPORTANCE: 1676 case TIPC_IMPORTANCE:
1526 res = tipc_set_portimportance(tsock->p->ref, value); 1677 res = tipc_set_portimportance(tport->ref, value);
1527 break; 1678 break;
1528 case TIPC_SRC_DROPPABLE: 1679 case TIPC_SRC_DROPPABLE:
1529 if (sock->type != SOCK_STREAM) 1680 if (sock->type != SOCK_STREAM)
1530 res = tipc_set_portunreliable(tsock->p->ref, value); 1681 res = tipc_set_portunreliable(tport->ref, value);
1531 else 1682 else
1532 res = -ENOPROTOOPT; 1683 res = -ENOPROTOOPT;
1533 break; 1684 break;
1534 case TIPC_DEST_DROPPABLE: 1685 case TIPC_DEST_DROPPABLE:
1535 res = tipc_set_portunreturnable(tsock->p->ref, value); 1686 res = tipc_set_portunreturnable(tport->ref, value);
1536 break; 1687 break;
1537 case TIPC_CONN_TIMEOUT: 1688 case TIPC_CONN_TIMEOUT:
1538 sock->sk->sk_rcvtimeo = (value * HZ / 1000); 1689 sk->sk_rcvtimeo = msecs_to_jiffies(value);
1690 /* no need to set "res", since already 0 at this point */
1539 break; 1691 break;
1540 default: 1692 default:
1541 res = -EINVAL; 1693 res = -EINVAL;
1542 } 1694 }
1543 1695
1544 up(&tsock->sem); 1696 release_sock(sk);
1697
1545 return res; 1698 return res;
1546} 1699}
1547 1700
@@ -1562,7 +1715,8 @@ static int setsockopt(struct socket *sock,
1562static int getsockopt(struct socket *sock, 1715static int getsockopt(struct socket *sock,
1563 int lvl, int opt, char __user *ov, int __user *ol) 1716 int lvl, int opt, char __user *ov, int __user *ol)
1564{ 1717{
1565 struct tipc_sock *tsock = tipc_sk(sock->sk); 1718 struct sock *sk = sock->sk;
1719 struct tipc_port *tport = tipc_sk_port(sk);
1566 int len; 1720 int len;
1567 u32 value; 1721 u32 value;
1568 int res; 1722 int res;
@@ -1574,26 +1728,28 @@ static int getsockopt(struct socket *sock,
1574 if ((res = get_user(len, ol))) 1728 if ((res = get_user(len, ol)))
1575 return res; 1729 return res;
1576 1730
1577 if (down_interruptible(&tsock->sem)) 1731 lock_sock(sk);
1578 return -ERESTARTSYS;
1579 1732
1580 switch (opt) { 1733 switch (opt) {
1581 case TIPC_IMPORTANCE: 1734 case TIPC_IMPORTANCE:
1582 res = tipc_portimportance(tsock->p->ref, &value); 1735 res = tipc_portimportance(tport->ref, &value);
1583 break; 1736 break;
1584 case TIPC_SRC_DROPPABLE: 1737 case TIPC_SRC_DROPPABLE:
1585 res = tipc_portunreliable(tsock->p->ref, &value); 1738 res = tipc_portunreliable(tport->ref, &value);
1586 break; 1739 break;
1587 case TIPC_DEST_DROPPABLE: 1740 case TIPC_DEST_DROPPABLE:
1588 res = tipc_portunreturnable(tsock->p->ref, &value); 1741 res = tipc_portunreturnable(tport->ref, &value);
1589 break; 1742 break;
1590 case TIPC_CONN_TIMEOUT: 1743 case TIPC_CONN_TIMEOUT:
1591 value = (sock->sk->sk_rcvtimeo * 1000) / HZ; 1744 value = jiffies_to_msecs(sk->sk_rcvtimeo);
1745 /* no need to set "res", since already 0 at this point */
1592 break; 1746 break;
1593 default: 1747 default:
1594 res = -EINVAL; 1748 res = -EINVAL;
1595 } 1749 }
1596 1750
1751 release_sock(sk);
1752
1597 if (res) { 1753 if (res) {
1598 /* "get" failed */ 1754 /* "get" failed */
1599 } 1755 }
@@ -1607,7 +1763,6 @@ static int getsockopt(struct socket *sock,
1607 res = put_user(sizeof(value), ol); 1763 res = put_user(sizeof(value), ol);
1608 } 1764 }
1609 1765
1610 up(&tsock->sem);
1611 return res; 1766 return res;
1612} 1767}
1613 1768
@@ -1720,6 +1875,7 @@ int tipc_socket_init(void)
1720/** 1875/**
1721 * tipc_socket_stop - stop TIPC socket interface 1876 * tipc_socket_stop - stop TIPC socket interface
1722 */ 1877 */
1878
1723void tipc_socket_stop(void) 1879void tipc_socket_stop(void)
1724{ 1880{
1725 if (!sockets_enabled) 1881 if (!sockets_enabled)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b8788fd5e3c6..2851d0d15048 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -252,7 +252,7 @@ static struct sock *__unix_find_socket_byname(struct net *net,
252 sk_for_each(s, node, &unix_socket_table[hash ^ type]) { 252 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
253 struct unix_sock *u = unix_sk(s); 253 struct unix_sock *u = unix_sk(s);
254 254
255 if (s->sk_net != net) 255 if (!net_eq(sock_net(s), net))
256 continue; 256 continue;
257 257
258 if (u->addr->len == len && 258 if (u->addr->len == len &&
@@ -289,7 +289,7 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
289 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 289 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
290 struct dentry *dentry = unix_sk(s)->dentry; 290 struct dentry *dentry = unix_sk(s)->dentry;
291 291
292 if (s->sk_net != net) 292 if (!net_eq(sock_net(s), net))
293 continue; 293 continue;
294 294
295 if(dentry && dentry->d_inode == i) 295 if(dentry && dentry->d_inode == i)
@@ -654,7 +654,7 @@ static int unix_release(struct socket *sock)
654static int unix_autobind(struct socket *sock) 654static int unix_autobind(struct socket *sock)
655{ 655{
656 struct sock *sk = sock->sk; 656 struct sock *sk = sock->sk;
657 struct net *net = sk->sk_net; 657 struct net *net = sock_net(sk);
658 struct unix_sock *u = unix_sk(sk); 658 struct unix_sock *u = unix_sk(sk);
659 static u32 ordernum = 1; 659 static u32 ordernum = 1;
660 struct unix_address * addr; 660 struct unix_address * addr;
@@ -758,7 +758,7 @@ fail:
758static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 758static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
759{ 759{
760 struct sock *sk = sock->sk; 760 struct sock *sk = sock->sk;
761 struct net *net = sk->sk_net; 761 struct net *net = sock_net(sk);
762 struct unix_sock *u = unix_sk(sk); 762 struct unix_sock *u = unix_sk(sk);
763 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr; 763 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
764 struct dentry * dentry = NULL; 764 struct dentry * dentry = NULL;
@@ -899,7 +899,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
899 int alen, int flags) 899 int alen, int flags)
900{ 900{
901 struct sock *sk = sock->sk; 901 struct sock *sk = sock->sk;
902 struct net *net = sk->sk_net; 902 struct net *net = sock_net(sk);
903 struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr; 903 struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
904 struct sock *other; 904 struct sock *other;
905 unsigned hash; 905 unsigned hash;
@@ -996,7 +996,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
996{ 996{
997 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr; 997 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
998 struct sock *sk = sock->sk; 998 struct sock *sk = sock->sk;
999 struct net *net = sk->sk_net; 999 struct net *net = sock_net(sk);
1000 struct unix_sock *u = unix_sk(sk), *newu, *otheru; 1000 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1001 struct sock *newsk = NULL; 1001 struct sock *newsk = NULL;
1002 struct sock *other = NULL; 1002 struct sock *other = NULL;
@@ -1025,7 +1025,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1025 err = -ENOMEM; 1025 err = -ENOMEM;
1026 1026
1027 /* create new sock for complete connection */ 1027 /* create new sock for complete connection */
1028 newsk = unix_create1(sk->sk_net, NULL); 1028 newsk = unix_create1(sock_net(sk), NULL);
1029 if (newsk == NULL) 1029 if (newsk == NULL)
1030 goto out; 1030 goto out;
1031 1031
@@ -1312,7 +1312,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1312{ 1312{
1313 struct sock_iocb *siocb = kiocb_to_siocb(kiocb); 1313 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1314 struct sock *sk = sock->sk; 1314 struct sock *sk = sock->sk;
1315 struct net *net = sk->sk_net; 1315 struct net *net = sock_net(sk);
1316 struct unix_sock *u = unix_sk(sk); 1316 struct unix_sock *u = unix_sk(sk);
1317 struct sockaddr_un *sunaddr=msg->msg_name; 1317 struct sockaddr_un *sunaddr=msg->msg_name;
1318 struct sock *other = NULL; 1318 struct sock *other = NULL;
@@ -2016,13 +2016,14 @@ struct unix_iter_state {
2016 struct seq_net_private p; 2016 struct seq_net_private p;
2017 int i; 2017 int i;
2018}; 2018};
2019static struct sock *unix_seq_idx(struct unix_iter_state *iter, loff_t pos) 2019static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2020{ 2020{
2021 struct unix_iter_state *iter = seq->private;
2021 loff_t off = 0; 2022 loff_t off = 0;
2022 struct sock *s; 2023 struct sock *s;
2023 2024
2024 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) { 2025 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2025 if (s->sk_net != iter->p.net) 2026 if (sock_net(s) != seq_file_net(seq))
2026 continue; 2027 continue;
2027 if (off == pos) 2028 if (off == pos)
2028 return s; 2029 return s;
@@ -2035,9 +2036,8 @@ static struct sock *unix_seq_idx(struct unix_iter_state *iter, loff_t pos)
2035static void *unix_seq_start(struct seq_file *seq, loff_t *pos) 2036static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2036 __acquires(unix_table_lock) 2037 __acquires(unix_table_lock)
2037{ 2038{
2038 struct unix_iter_state *iter = seq->private;
2039 spin_lock(&unix_table_lock); 2039 spin_lock(&unix_table_lock);
2040 return *pos ? unix_seq_idx(iter, *pos - 1) : ((void *) 1); 2040 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2041} 2041}
2042 2042
2043static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2043static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -2046,11 +2046,11 @@ static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2046 struct sock *sk = v; 2046 struct sock *sk = v;
2047 ++*pos; 2047 ++*pos;
2048 2048
2049 if (v == (void *)1) 2049 if (v == SEQ_START_TOKEN)
2050 sk = first_unix_socket(&iter->i); 2050 sk = first_unix_socket(&iter->i);
2051 else 2051 else
2052 sk = next_unix_socket(&iter->i, sk); 2052 sk = next_unix_socket(&iter->i, sk);
2053 while (sk && (sk->sk_net != iter->p.net)) 2053 while (sk && (sock_net(sk) != seq_file_net(seq)))
2054 sk = next_unix_socket(&iter->i, sk); 2054 sk = next_unix_socket(&iter->i, sk);
2055 return sk; 2055 return sk;
2056} 2056}
@@ -2064,7 +2064,7 @@ static void unix_seq_stop(struct seq_file *seq, void *v)
2064static int unix_seq_show(struct seq_file *seq, void *v) 2064static int unix_seq_show(struct seq_file *seq, void *v)
2065{ 2065{
2066 2066
2067 if (v == (void *)1) 2067 if (v == SEQ_START_TOKEN)
2068 seq_puts(seq, "Num RefCount Protocol Flags Type St " 2068 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2069 "Inode Path\n"); 2069 "Inode Path\n");
2070 else { 2070 else {
@@ -2176,7 +2176,7 @@ static int __init af_unix_init(void)
2176 rc = proto_register(&unix_proto, 1); 2176 rc = proto_register(&unix_proto, 1);
2177 if (rc != 0) { 2177 if (rc != 0) {
2178 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n", 2178 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2179 __FUNCTION__); 2179 __func__);
2180 goto out; 2180 goto out;
2181 } 2181 }
2182 2182
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 65710a42e5a7..b9f943c45f3b 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_WIRELESS_EXT) += wext.o 1obj-$(CONFIG_WIRELESS_EXT) += wext.o
2obj-$(CONFIG_CFG80211) += cfg80211.o 2obj-$(CONFIG_CFG80211) += cfg80211.o
3 3
4cfg80211-y += core.o sysfs.o radiotap.o 4cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o
5cfg80211-$(CONFIG_NL80211) += nl80211.o 5cfg80211-$(CONFIG_NL80211) += nl80211.o
diff --git a/net/wireless/core.c b/net/wireless/core.c
index cfc5fc5f9e75..80afacdae46c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -232,6 +232,47 @@ int wiphy_register(struct wiphy *wiphy)
232{ 232{
233 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); 233 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
234 int res; 234 int res;
235 enum ieee80211_band band;
236 struct ieee80211_supported_band *sband;
237 bool have_band = false;
238 int i;
239
240 /* sanity check supported bands/channels */
241 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
242 sband = wiphy->bands[band];
243 if (!sband)
244 continue;
245
246 sband->band = band;
247
248 if (!sband->n_channels || !sband->n_bitrates) {
249 WARN_ON(1);
250 return -EINVAL;
251 }
252
253 for (i = 0; i < sband->n_channels; i++) {
254 sband->channels[i].orig_flags =
255 sband->channels[i].flags;
256 sband->channels[i].orig_mag =
257 sband->channels[i].max_antenna_gain;
258 sband->channels[i].orig_mpwr =
259 sband->channels[i].max_power;
260 sband->channels[i].band = band;
261 }
262
263 have_band = true;
264 }
265
266 if (!have_band) {
267 WARN_ON(1);
268 return -EINVAL;
269 }
270
271 /* check and set up bitrates */
272 ieee80211_set_bitrate_flags(wiphy);
273
274 /* set up regulatory info */
275 wiphy_update_regulatory(wiphy);
235 276
236 mutex_lock(&cfg80211_drv_mutex); 277 mutex_lock(&cfg80211_drv_mutex);
237 278
diff --git a/net/wireless/core.h b/net/wireless/core.h
index eb0f846b40df..7a02c356d63d 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -78,4 +78,7 @@ extern void cfg80211_dev_free(struct cfg80211_registered_device *drv);
78extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv, 78extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv,
79 char *newname); 79 char *newname);
80 80
81void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
82void wiphy_update_regulatory(struct wiphy *wiphy);
83
81#endif /* __NET_WIRELESS_CORE_H */ 84#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f68a5c8f2147..2bdd4dddc0e1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -81,7 +81,12 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
81 [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, 81 [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 },
82 [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, 82 [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY,
83 .len = NL80211_MAX_SUPP_RATES }, 83 .len = NL80211_MAX_SUPP_RATES },
84 [NL80211_ATTR_STA_PLINK_ACTION] = { .type = NLA_U8 },
84 [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, 85 [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 },
86 [NL80211_ATTR_MNTR_FLAGS] = { .type = NLA_NESTED },
87 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
88 .len = IEEE80211_MAX_MESH_ID_LEN },
89 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
85}; 90};
86 91
87/* message building helper */ 92/* message building helper */
@@ -98,6 +103,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
98 struct cfg80211_registered_device *dev) 103 struct cfg80211_registered_device *dev)
99{ 104{
100 void *hdr; 105 void *hdr;
106 struct nlattr *nl_bands, *nl_band;
107 struct nlattr *nl_freqs, *nl_freq;
108 struct nlattr *nl_rates, *nl_rate;
109 enum ieee80211_band band;
110 struct ieee80211_channel *chan;
111 struct ieee80211_rate *rate;
112 int i;
101 113
102 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); 114 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
103 if (!hdr) 115 if (!hdr)
@@ -105,6 +117,73 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
105 117
106 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); 118 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx);
107 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 119 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
120
121 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
122 if (!nl_bands)
123 goto nla_put_failure;
124
125 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
126 if (!dev->wiphy.bands[band])
127 continue;
128
129 nl_band = nla_nest_start(msg, band);
130 if (!nl_band)
131 goto nla_put_failure;
132
133 /* add frequencies */
134 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
135 if (!nl_freqs)
136 goto nla_put_failure;
137
138 for (i = 0; i < dev->wiphy.bands[band]->n_channels; i++) {
139 nl_freq = nla_nest_start(msg, i);
140 if (!nl_freq)
141 goto nla_put_failure;
142
143 chan = &dev->wiphy.bands[band]->channels[i];
144 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
145 chan->center_freq);
146
147 if (chan->flags & IEEE80211_CHAN_DISABLED)
148 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
149 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
150 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
151 if (chan->flags & IEEE80211_CHAN_NO_IBSS)
152 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
153 if (chan->flags & IEEE80211_CHAN_RADAR)
154 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
155
156 nla_nest_end(msg, nl_freq);
157 }
158
159 nla_nest_end(msg, nl_freqs);
160
161 /* add bitrates */
162 nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
163 if (!nl_rates)
164 goto nla_put_failure;
165
166 for (i = 0; i < dev->wiphy.bands[band]->n_bitrates; i++) {
167 nl_rate = nla_nest_start(msg, i);
168 if (!nl_rate)
169 goto nla_put_failure;
170
171 rate = &dev->wiphy.bands[band]->bitrates[i];
172 NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE,
173 rate->bitrate);
174 if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
175 NLA_PUT_FLAG(msg,
176 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE);
177
178 nla_nest_end(msg, nl_rate);
179 }
180
181 nla_nest_end(msg, nl_rates);
182
183 nla_nest_end(msg, nl_band);
184 }
185 nla_nest_end(msg, nl_bands);
186
108 return genlmsg_end(msg, hdr); 187 return genlmsg_end(msg, hdr);
109 188
110 nla_put_failure: 189 nla_put_failure:
@@ -262,12 +341,45 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
262 return -ENOBUFS; 341 return -ENOBUFS;
263} 342}
264 343
344static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = {
345 [NL80211_MNTR_FLAG_FCSFAIL] = { .type = NLA_FLAG },
346 [NL80211_MNTR_FLAG_PLCPFAIL] = { .type = NLA_FLAG },
347 [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG },
348 [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG },
349 [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG },
350};
351
352static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
353{
354 struct nlattr *flags[NL80211_MNTR_FLAG_MAX + 1];
355 int flag;
356
357 *mntrflags = 0;
358
359 if (!nla)
360 return -EINVAL;
361
362 if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX,
363 nla, mntr_flags_policy))
364 return -EINVAL;
365
366 for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++)
367 if (flags[flag])
368 *mntrflags |= (1<<flag);
369
370 return 0;
371}
372
265static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) 373static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
266{ 374{
267 struct cfg80211_registered_device *drv; 375 struct cfg80211_registered_device *drv;
376 struct vif_params params;
268 int err, ifindex; 377 int err, ifindex;
269 enum nl80211_iftype type; 378 enum nl80211_iftype type;
270 struct net_device *dev; 379 struct net_device *dev;
380 u32 flags;
381
382 memset(&params, 0, sizeof(params));
271 383
272 if (info->attrs[NL80211_ATTR_IFTYPE]) { 384 if (info->attrs[NL80211_ATTR_IFTYPE]) {
273 type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); 385 type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
@@ -287,8 +399,18 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
287 goto unlock; 399 goto unlock;
288 } 400 }
289 401
402 if (type == NL80211_IFTYPE_MESH_POINT &&
403 info->attrs[NL80211_ATTR_MESH_ID]) {
404 params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
405 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
406 }
407
290 rtnl_lock(); 408 rtnl_lock();
291 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, type); 409 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
410 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
411 &flags);
412 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
413 type, err ? NULL : &flags, &params);
292 rtnl_unlock(); 414 rtnl_unlock();
293 415
294 unlock: 416 unlock:
@@ -299,8 +421,12 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
299static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) 421static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
300{ 422{
301 struct cfg80211_registered_device *drv; 423 struct cfg80211_registered_device *drv;
424 struct vif_params params;
302 int err; 425 int err;
303 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; 426 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
427 u32 flags;
428
429 memset(&params, 0, sizeof(params));
304 430
305 if (!info->attrs[NL80211_ATTR_IFNAME]) 431 if (!info->attrs[NL80211_ATTR_IFNAME])
306 return -EINVAL; 432 return -EINVAL;
@@ -320,11 +446,22 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
320 goto unlock; 446 goto unlock;
321 } 447 }
322 448
449 if (type == NL80211_IFTYPE_MESH_POINT &&
450 info->attrs[NL80211_ATTR_MESH_ID]) {
451 params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
452 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
453 }
454
323 rtnl_lock(); 455 rtnl_lock();
456 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
457 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
458 &flags);
324 err = drv->ops->add_virtual_intf(&drv->wiphy, 459 err = drv->ops->add_virtual_intf(&drv->wiphy,
325 nla_data(info->attrs[NL80211_ATTR_IFNAME]), type); 460 nla_data(info->attrs[NL80211_ATTR_IFNAME]),
461 type, err ? NULL : &flags, &params);
326 rtnl_unlock(); 462 rtnl_unlock();
327 463
464
328 unlock: 465 unlock:
329 cfg80211_put_dev(drv); 466 cfg80211_put_dev(drv);
330 return err; 467 return err;
@@ -752,10 +889,10 @@ static int parse_station_flags(struct nlattr *nla, u32 *staflags)
752 889
753static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, 890static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
754 int flags, struct net_device *dev, 891 int flags, struct net_device *dev,
755 u8 *mac_addr, struct station_stats *stats) 892 u8 *mac_addr, struct station_info *sinfo)
756{ 893{
757 void *hdr; 894 void *hdr;
758 struct nlattr *statsattr; 895 struct nlattr *sinfoattr;
759 896
760 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); 897 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
761 if (!hdr) 898 if (!hdr)
@@ -764,20 +901,29 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
764 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 901 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
765 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 902 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
766 903
767 statsattr = nla_nest_start(msg, NL80211_ATTR_STA_STATS); 904 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
768 if (!statsattr) 905 if (!sinfoattr)
769 goto nla_put_failure; 906 goto nla_put_failure;
770 if (stats->filled & STATION_STAT_INACTIVE_TIME) 907 if (sinfo->filled & STATION_INFO_INACTIVE_TIME)
771 NLA_PUT_U32(msg, NL80211_STA_STAT_INACTIVE_TIME, 908 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME,
772 stats->inactive_time); 909 sinfo->inactive_time);
773 if (stats->filled & STATION_STAT_RX_BYTES) 910 if (sinfo->filled & STATION_INFO_RX_BYTES)
774 NLA_PUT_U32(msg, NL80211_STA_STAT_RX_BYTES, 911 NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES,
775 stats->rx_bytes); 912 sinfo->rx_bytes);
776 if (stats->filled & STATION_STAT_TX_BYTES) 913 if (sinfo->filled & STATION_INFO_TX_BYTES)
777 NLA_PUT_U32(msg, NL80211_STA_STAT_TX_BYTES, 914 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES,
778 stats->tx_bytes); 915 sinfo->tx_bytes);
779 916 if (sinfo->filled & STATION_INFO_LLID)
780 nla_nest_end(msg, statsattr); 917 NLA_PUT_U16(msg, NL80211_STA_INFO_LLID,
918 sinfo->llid);
919 if (sinfo->filled & STATION_INFO_PLID)
920 NLA_PUT_U16(msg, NL80211_STA_INFO_PLID,
921 sinfo->plid);
922 if (sinfo->filled & STATION_INFO_PLINK_STATE)
923 NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE,
924 sinfo->plink_state);
925
926 nla_nest_end(msg, sinfoattr);
781 927
782 return genlmsg_end(msg, hdr); 928 return genlmsg_end(msg, hdr);
783 929
@@ -785,17 +931,80 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
785 return genlmsg_cancel(msg, hdr); 931 return genlmsg_cancel(msg, hdr);
786} 932}
787 933
934static int nl80211_dump_station(struct sk_buff *skb,
935 struct netlink_callback *cb)
936{
937 int wp_idx = 0;
938 int if_idx = 0;
939 int sta_idx = cb->args[2];
940 int wp_start = cb->args[0];
941 int if_start = cb->args[1];
942 struct station_info sinfo;
943 struct cfg80211_registered_device *dev;
944 struct wireless_dev *wdev;
945 u8 mac_addr[ETH_ALEN];
946 int err;
947 int exit = 0;
948
949 /* TODO: filter by device */
950 mutex_lock(&cfg80211_drv_mutex);
951 list_for_each_entry(dev, &cfg80211_drv_list, list) {
952 if (exit)
953 break;
954 if (++wp_idx < wp_start)
955 continue;
956 if_idx = 0;
957
958 mutex_lock(&dev->devlist_mtx);
959 list_for_each_entry(wdev, &dev->netdev_list, list) {
960 if (exit)
961 break;
962 if (++if_idx < if_start)
963 continue;
964 if (!dev->ops->dump_station)
965 continue;
966
967 for (;; ++sta_idx) {
968 rtnl_lock();
969 err = dev->ops->dump_station(&dev->wiphy,
970 wdev->netdev, sta_idx, mac_addr,
971 &sinfo);
972 rtnl_unlock();
973 if (err) {
974 sta_idx = 0;
975 break;
976 }
977 if (nl80211_send_station(skb,
978 NETLINK_CB(cb->skb).pid,
979 cb->nlh->nlmsg_seq, NLM_F_MULTI,
980 wdev->netdev, mac_addr,
981 &sinfo) < 0) {
982 exit = 1;
983 break;
984 }
985 }
986 }
987 mutex_unlock(&dev->devlist_mtx);
988 }
989 mutex_unlock(&cfg80211_drv_mutex);
990
991 cb->args[0] = wp_idx;
992 cb->args[1] = if_idx;
993 cb->args[2] = sta_idx;
994
995 return skb->len;
996}
788 997
789static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) 998static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
790{ 999{
791 struct cfg80211_registered_device *drv; 1000 struct cfg80211_registered_device *drv;
792 int err; 1001 int err;
793 struct net_device *dev; 1002 struct net_device *dev;
794 struct station_stats stats; 1003 struct station_info sinfo;
795 struct sk_buff *msg; 1004 struct sk_buff *msg;
796 u8 *mac_addr = NULL; 1005 u8 *mac_addr = NULL;
797 1006
798 memset(&stats, 0, sizeof(stats)); 1007 memset(&sinfo, 0, sizeof(sinfo));
799 1008
800 if (!info->attrs[NL80211_ATTR_MAC]) 1009 if (!info->attrs[NL80211_ATTR_MAC])
801 return -EINVAL; 1010 return -EINVAL;
@@ -812,15 +1021,18 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
812 } 1021 }
813 1022
814 rtnl_lock(); 1023 rtnl_lock();
815 err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &stats); 1024 err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &sinfo);
816 rtnl_unlock(); 1025 rtnl_unlock();
817 1026
1027 if (err)
1028 goto out;
1029
818 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1030 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
819 if (!msg) 1031 if (!msg)
820 goto out; 1032 goto out;
821 1033
822 if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, 1034 if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0,
823 dev, mac_addr, &stats) < 0) 1035 dev, mac_addr, &sinfo) < 0)
824 goto out_free; 1036 goto out_free;
825 1037
826 err = genlmsg_unicast(msg, info->snd_pid); 1038 err = genlmsg_unicast(msg, info->snd_pid);
@@ -891,6 +1103,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
891 &params.station_flags)) 1103 &params.station_flags))
892 return -EINVAL; 1104 return -EINVAL;
893 1105
1106 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
1107 params.plink_action =
1108 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
1109
894 err = get_drv_dev_by_info_ifindex(info, &drv, &dev); 1110 err = get_drv_dev_by_info_ifindex(info, &drv, &dev);
895 if (err) 1111 if (err)
896 return err; 1112 return err;
@@ -1005,6 +1221,273 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
1005 return err; 1221 return err;
1006} 1222}
1007 1223
1224static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
1225 int flags, struct net_device *dev,
1226 u8 *dst, u8 *next_hop,
1227 struct mpath_info *pinfo)
1228{
1229 void *hdr;
1230 struct nlattr *pinfoattr;
1231
1232 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
1233 if (!hdr)
1234 return -1;
1235
1236 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
1237 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst);
1238 NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop);
1239
1240 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
1241 if (!pinfoattr)
1242 goto nla_put_failure;
1243 if (pinfo->filled & MPATH_INFO_FRAME_QLEN)
1244 NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
1245 pinfo->frame_qlen);
1246 if (pinfo->filled & MPATH_INFO_DSN)
1247 NLA_PUT_U32(msg, NL80211_MPATH_INFO_DSN,
1248 pinfo->dsn);
1249 if (pinfo->filled & MPATH_INFO_METRIC)
1250 NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC,
1251 pinfo->metric);
1252 if (pinfo->filled & MPATH_INFO_EXPTIME)
1253 NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME,
1254 pinfo->exptime);
1255 if (pinfo->filled & MPATH_INFO_FLAGS)
1256 NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS,
1257 pinfo->flags);
1258 if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT)
1259 NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
1260 pinfo->discovery_timeout);
1261 if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES)
1262 NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
1263 pinfo->discovery_retries);
1264
1265 nla_nest_end(msg, pinfoattr);
1266
1267 return genlmsg_end(msg, hdr);
1268
1269 nla_put_failure:
1270 return genlmsg_cancel(msg, hdr);
1271}
1272
1273static int nl80211_dump_mpath(struct sk_buff *skb,
1274 struct netlink_callback *cb)
1275{
1276 int wp_idx = 0;
1277 int if_idx = 0;
1278 int sta_idx = cb->args[2];
1279 int wp_start = cb->args[0];
1280 int if_start = cb->args[1];
1281 struct mpath_info pinfo;
1282 struct cfg80211_registered_device *dev;
1283 struct wireless_dev *wdev;
1284 u8 dst[ETH_ALEN];
1285 u8 next_hop[ETH_ALEN];
1286 int err;
1287 int exit = 0;
1288
1289 /* TODO: filter by device */
1290 mutex_lock(&cfg80211_drv_mutex);
1291 list_for_each_entry(dev, &cfg80211_drv_list, list) {
1292 if (exit)
1293 break;
1294 if (++wp_idx < wp_start)
1295 continue;
1296 if_idx = 0;
1297
1298 mutex_lock(&dev->devlist_mtx);
1299 list_for_each_entry(wdev, &dev->netdev_list, list) {
1300 if (exit)
1301 break;
1302 if (++if_idx < if_start)
1303 continue;
1304 if (!dev->ops->dump_mpath)
1305 continue;
1306
1307 for (;; ++sta_idx) {
1308 rtnl_lock();
1309 err = dev->ops->dump_mpath(&dev->wiphy,
1310 wdev->netdev, sta_idx, dst,
1311 next_hop, &pinfo);
1312 rtnl_unlock();
1313 if (err) {
1314 sta_idx = 0;
1315 break;
1316 }
1317 if (nl80211_send_mpath(skb,
1318 NETLINK_CB(cb->skb).pid,
1319 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1320 wdev->netdev, dst, next_hop,
1321 &pinfo) < 0) {
1322 exit = 1;
1323 break;
1324 }
1325 }
1326 }
1327 mutex_unlock(&dev->devlist_mtx);
1328 }
1329 mutex_unlock(&cfg80211_drv_mutex);
1330
1331 cb->args[0] = wp_idx;
1332 cb->args[1] = if_idx;
1333 cb->args[2] = sta_idx;
1334
1335 return skb->len;
1336}
1337
1338static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
1339{
1340 struct cfg80211_registered_device *drv;
1341 int err;
1342 struct net_device *dev;
1343 struct mpath_info pinfo;
1344 struct sk_buff *msg;
1345 u8 *dst = NULL;
1346 u8 next_hop[ETH_ALEN];
1347
1348 memset(&pinfo, 0, sizeof(pinfo));
1349
1350 if (!info->attrs[NL80211_ATTR_MAC])
1351 return -EINVAL;
1352
1353 dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
1354
1355 err = get_drv_dev_by_info_ifindex(info, &drv, &dev);
1356 if (err)
1357 return err;
1358
1359 if (!drv->ops->get_mpath) {
1360 err = -EOPNOTSUPP;
1361 goto out;
1362 }
1363
1364 rtnl_lock();
1365 err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo);
1366 rtnl_unlock();
1367
1368 if (err)
1369 goto out;
1370
1371 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1372 if (!msg)
1373 goto out;
1374
1375 if (nl80211_send_mpath(msg, info->snd_pid, info->snd_seq, 0,
1376 dev, dst, next_hop, &pinfo) < 0)
1377 goto out_free;
1378
1379 err = genlmsg_unicast(msg, info->snd_pid);
1380 goto out;
1381
1382 out_free:
1383 nlmsg_free(msg);
1384
1385 out:
1386 cfg80211_put_dev(drv);
1387 dev_put(dev);
1388 return err;
1389}
1390
1391static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
1392{
1393 struct cfg80211_registered_device *drv;
1394 int err;
1395 struct net_device *dev;
1396 u8 *dst = NULL;
1397 u8 *next_hop = NULL;
1398
1399 if (!info->attrs[NL80211_ATTR_MAC])
1400 return -EINVAL;
1401
1402 if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP])
1403 return -EINVAL;
1404
1405 dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
1406 next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]);
1407
1408 err = get_drv_dev_by_info_ifindex(info, &drv, &dev);
1409 if (err)
1410 return err;
1411
1412 if (!drv->ops->change_mpath) {
1413 err = -EOPNOTSUPP;
1414 goto out;
1415 }
1416
1417 rtnl_lock();
1418 err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop);
1419 rtnl_unlock();
1420
1421 out:
1422 cfg80211_put_dev(drv);
1423 dev_put(dev);
1424 return err;
1425}
1426static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
1427{
1428 struct cfg80211_registered_device *drv;
1429 int err;
1430 struct net_device *dev;
1431 u8 *dst = NULL;
1432 u8 *next_hop = NULL;
1433
1434 if (!info->attrs[NL80211_ATTR_MAC])
1435 return -EINVAL;
1436
1437 if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP])
1438 return -EINVAL;
1439
1440 dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
1441 next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]);
1442
1443 err = get_drv_dev_by_info_ifindex(info, &drv, &dev);
1444 if (err)
1445 return err;
1446
1447 if (!drv->ops->add_mpath) {
1448 err = -EOPNOTSUPP;
1449 goto out;
1450 }
1451
1452 rtnl_lock();
1453 err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop);
1454 rtnl_unlock();
1455
1456 out:
1457 cfg80211_put_dev(drv);
1458 dev_put(dev);
1459 return err;
1460}
1461
1462static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
1463{
1464 struct cfg80211_registered_device *drv;
1465 int err;
1466 struct net_device *dev;
1467 u8 *dst = NULL;
1468
1469 if (info->attrs[NL80211_ATTR_MAC])
1470 dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
1471
1472 err = get_drv_dev_by_info_ifindex(info, &drv, &dev);
1473 if (err)
1474 return err;
1475
1476 if (!drv->ops->del_mpath) {
1477 err = -EOPNOTSUPP;
1478 goto out;
1479 }
1480
1481 rtnl_lock();
1482 err = drv->ops->del_mpath(&drv->wiphy, dev, dst);
1483 rtnl_unlock();
1484
1485 out:
1486 cfg80211_put_dev(drv);
1487 dev_put(dev);
1488 return err;
1489}
1490
1008static struct genl_ops nl80211_ops[] = { 1491static struct genl_ops nl80211_ops[] = {
1009 { 1492 {
1010 .cmd = NL80211_CMD_GET_WIPHY, 1493 .cmd = NL80211_CMD_GET_WIPHY,
@@ -1089,7 +1572,7 @@ static struct genl_ops nl80211_ops[] = {
1089 { 1572 {
1090 .cmd = NL80211_CMD_GET_STATION, 1573 .cmd = NL80211_CMD_GET_STATION,
1091 .doit = nl80211_get_station, 1574 .doit = nl80211_get_station,
1092 /* TODO: implement dumpit */ 1575 .dumpit = nl80211_dump_station,
1093 .policy = nl80211_policy, 1576 .policy = nl80211_policy,
1094 .flags = GENL_ADMIN_PERM, 1577 .flags = GENL_ADMIN_PERM,
1095 }, 1578 },
@@ -1111,6 +1594,31 @@ static struct genl_ops nl80211_ops[] = {
1111 .policy = nl80211_policy, 1594 .policy = nl80211_policy,
1112 .flags = GENL_ADMIN_PERM, 1595 .flags = GENL_ADMIN_PERM,
1113 }, 1596 },
1597 {
1598 .cmd = NL80211_CMD_GET_MPATH,
1599 .doit = nl80211_get_mpath,
1600 .dumpit = nl80211_dump_mpath,
1601 .policy = nl80211_policy,
1602 .flags = GENL_ADMIN_PERM,
1603 },
1604 {
1605 .cmd = NL80211_CMD_SET_MPATH,
1606 .doit = nl80211_set_mpath,
1607 .policy = nl80211_policy,
1608 .flags = GENL_ADMIN_PERM,
1609 },
1610 {
1611 .cmd = NL80211_CMD_NEW_MPATH,
1612 .doit = nl80211_new_mpath,
1613 .policy = nl80211_policy,
1614 .flags = GENL_ADMIN_PERM,
1615 },
1616 {
1617 .cmd = NL80211_CMD_DEL_MPATH,
1618 .doit = nl80211_del_mpath,
1619 .policy = nl80211_policy,
1620 .flags = GENL_ADMIN_PERM,
1621 },
1114}; 1622};
1115 1623
1116/* multicast groups */ 1624/* multicast groups */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
new file mode 100644
index 000000000000..185488da2466
--- /dev/null
+++ b/net/wireless/reg.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * This regulatory domain control implementation is highly incomplete, it
13 * only exists for the purpose of not regressing mac80211.
14 *
15 * For now, drivers can restrict the set of allowed channels by either
16 * not registering those channels or setting the IEEE80211_CHAN_DISABLED
17 * flag; that flag will only be *set* by this code, never *cleared.
18 *
19 * The usual implementation is for a driver to read a device EEPROM to
20 * determine which regulatory domain it should be operating under, then
21 * looking up the allowable channels in a driver-local table and finally
22 * registering those channels in the wiphy structure.
23 *
24 * Alternatively, drivers that trust the regulatory domain control here
25 * will register a complete set of capabilities and the control code
26 * will restrict the set by setting the IEEE80211_CHAN_* flags.
27 */
28#include <linux/kernel.h>
29#include <net/wireless.h>
30#include "core.h"
31
32static char *ieee80211_regdom = "US";
33module_param(ieee80211_regdom, charp, 0444);
34MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
35
36struct ieee80211_channel_range {
37 short start_freq;
38 short end_freq;
39 int max_power;
40 int max_antenna_gain;
41 u32 flags;
42};
43
44struct ieee80211_regdomain {
45 const char *code;
46 const struct ieee80211_channel_range *ranges;
47 int n_ranges;
48};
49
50#define RANGE_PWR(_start, _end, _pwr, _ag, _flags) \
51 { _start, _end, _pwr, _ag, _flags }
52
53
54/*
55 * Ideally, in the future, these definitions will be loaded from a
56 * userspace table via some daemon.
57 */
58static const struct ieee80211_channel_range ieee80211_US_channels[] = {
59 /* IEEE 802.11b/g, channels 1..11 */
60 RANGE_PWR(2412, 2462, 27, 6, 0),
61 /* IEEE 802.11a, channel 36*/
62 RANGE_PWR(5180, 5180, 23, 6, 0),
63 /* IEEE 802.11a, channel 40*/
64 RANGE_PWR(5200, 5200, 23, 6, 0),
65 /* IEEE 802.11a, channel 44*/
66 RANGE_PWR(5220, 5220, 23, 6, 0),
67 /* IEEE 802.11a, channels 48..64 */
68 RANGE_PWR(5240, 5320, 23, 6, 0),
69 /* IEEE 802.11a, channels 149..165, outdoor */
70 RANGE_PWR(5745, 5825, 30, 6, 0),
71};
72
73static const struct ieee80211_channel_range ieee80211_JP_channels[] = {
74 /* IEEE 802.11b/g, channels 1..14 */
75 RANGE_PWR(2412, 2484, 20, 6, 0),
76 /* IEEE 802.11a, channels 34..48 */
77 RANGE_PWR(5170, 5240, 20, 6, IEEE80211_CHAN_PASSIVE_SCAN),
78 /* IEEE 802.11a, channels 52..64 */
79 RANGE_PWR(5260, 5320, 20, 6, IEEE80211_CHAN_NO_IBSS |
80 IEEE80211_CHAN_RADAR),
81};
82
83#define REGDOM(_code) \
84 { \
85 .code = __stringify(_code), \
86 .ranges = ieee80211_ ##_code## _channels, \
87 .n_ranges = ARRAY_SIZE(ieee80211_ ##_code## _channels), \
88 }
89
90static const struct ieee80211_regdomain ieee80211_regdoms[] = {
91 REGDOM(US),
92 REGDOM(JP),
93};
94
95
96static const struct ieee80211_regdomain *get_regdom(void)
97{
98 static const struct ieee80211_channel_range
99 ieee80211_world_channels[] = {
100 /* IEEE 802.11b/g, channels 1..11 */
101 RANGE_PWR(2412, 2462, 27, 6, 0),
102 };
103 static const struct ieee80211_regdomain regdom_world = REGDOM(world);
104 int i;
105
106 for (i = 0; i < ARRAY_SIZE(ieee80211_regdoms); i++)
107 if (strcmp(ieee80211_regdom, ieee80211_regdoms[i].code) == 0)
108 return &ieee80211_regdoms[i];
109
110 return &regdom_world;
111}
112
113
114static void handle_channel(struct ieee80211_channel *chan,
115 const struct ieee80211_regdomain *rd)
116{
117 int i;
118 u32 flags = chan->orig_flags;
119 const struct ieee80211_channel_range *rg = NULL;
120
121 for (i = 0; i < rd->n_ranges; i++) {
122 if (rd->ranges[i].start_freq <= chan->center_freq &&
123 chan->center_freq <= rd->ranges[i].end_freq) {
124 rg = &rd->ranges[i];
125 break;
126 }
127 }
128
129 if (!rg) {
130 /* not found */
131 flags |= IEEE80211_CHAN_DISABLED;
132 chan->flags = flags;
133 return;
134 }
135
136 chan->flags = flags;
137 chan->max_antenna_gain = min(chan->orig_mag,
138 rg->max_antenna_gain);
139 if (chan->orig_mpwr)
140 chan->max_power = min(chan->orig_mpwr, rg->max_power);
141 else
142 chan->max_power = rg->max_power;
143}
144
145static void handle_band(struct ieee80211_supported_band *sband,
146 const struct ieee80211_regdomain *rd)
147{
148 int i;
149
150 for (i = 0; i < sband->n_channels; i++)
151 handle_channel(&sband->channels[i], rd);
152}
153
154void wiphy_update_regulatory(struct wiphy *wiphy)
155{
156 enum ieee80211_band band;
157 const struct ieee80211_regdomain *rd = get_regdom();
158
159 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
160 if (wiphy->bands[band])
161 handle_band(wiphy->bands[band], rd);
162}
diff --git a/net/wireless/util.c b/net/wireless/util.c
new file mode 100644
index 000000000000..f54424693a38
--- /dev/null
+++ b/net/wireless/util.c
@@ -0,0 +1,121 @@
1/*
2 * Wireless utility functions
3 *
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5 */
6#include <net/wireless.h>
7#include <asm/bitops.h>
8#include "core.h"
9
10int ieee80211_channel_to_frequency(int chan)
11{
12 if (chan < 14)
13 return 2407 + chan * 5;
14
15 if (chan == 14)
16 return 2484;
17
18 /* FIXME: 802.11j 17.3.8.3.2 */
19 return (chan + 1000) * 5;
20}
21EXPORT_SYMBOL(ieee80211_channel_to_frequency);
22
23int ieee80211_frequency_to_channel(int freq)
24{
25 if (freq == 2484)
26 return 14;
27
28 if (freq < 2484)
29 return (freq - 2407) / 5;
30
31 /* FIXME: 802.11j 17.3.8.3.2 */
32 return freq/5 - 1000;
33}
34EXPORT_SYMBOL(ieee80211_frequency_to_channel);
35
36struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
37 int freq)
38{
39 enum ieee80211_band band;
40 struct ieee80211_supported_band *sband;
41 int i;
42
43 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
44 sband = wiphy->bands[band];
45
46 if (!sband)
47 continue;
48
49 for (i = 0; i < sband->n_channels; i++) {
50 if (sband->channels[i].center_freq == freq)
51 return &sband->channels[i];
52 }
53 }
54
55 return NULL;
56}
57EXPORT_SYMBOL(__ieee80211_get_channel);
58
59static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
60 enum ieee80211_band band)
61{
62 int i, want;
63
64 switch (band) {
65 case IEEE80211_BAND_5GHZ:
66 want = 3;
67 for (i = 0; i < sband->n_bitrates; i++) {
68 if (sband->bitrates[i].bitrate == 60 ||
69 sband->bitrates[i].bitrate == 120 ||
70 sband->bitrates[i].bitrate == 240) {
71 sband->bitrates[i].flags |=
72 IEEE80211_RATE_MANDATORY_A;
73 want--;
74 }
75 }
76 WARN_ON(want);
77 break;
78 case IEEE80211_BAND_2GHZ:
79 want = 7;
80 for (i = 0; i < sband->n_bitrates; i++) {
81 if (sband->bitrates[i].bitrate == 10) {
82 sband->bitrates[i].flags |=
83 IEEE80211_RATE_MANDATORY_B |
84 IEEE80211_RATE_MANDATORY_G;
85 want--;
86 }
87
88 if (sband->bitrates[i].bitrate == 20 ||
89 sband->bitrates[i].bitrate == 55 ||
90 sband->bitrates[i].bitrate == 110 ||
91 sband->bitrates[i].bitrate == 60 ||
92 sband->bitrates[i].bitrate == 120 ||
93 sband->bitrates[i].bitrate == 240) {
94 sband->bitrates[i].flags |=
95 IEEE80211_RATE_MANDATORY_G;
96 want--;
97 }
98
99 if (sband->bitrates[i].bitrate != 10 &&
100 sband->bitrates[i].bitrate != 20 &&
101 sband->bitrates[i].bitrate != 55 &&
102 sband->bitrates[i].bitrate != 110)
103 sband->bitrates[i].flags |=
104 IEEE80211_RATE_ERP_G;
105 }
106 WARN_ON(want != 0 && want != 3 && want != 6);
107 break;
108 case IEEE80211_NUM_BANDS:
109 WARN_ON(1);
110 break;
111 }
112}
113
114void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
115{
116 enum ieee80211_band band;
117
118 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
119 if (wiphy->bands[band])
120 set_mandatory_flags_band(wiphy->bands[band], band);
121}
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index 2c569b63e7d8..947188a5b937 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1157,7 +1157,7 @@ static void rtmsg_iwinfo(struct net_device *dev, char *event, int event_len)
1157 struct sk_buff *skb; 1157 struct sk_buff *skb;
1158 int err; 1158 int err;
1159 1159
1160 if (dev->nd_net != &init_net) 1160 if (dev_net(dev) != &init_net)
1161 return; 1161 return;
1162 1162
1163 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1163 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 339ca4a8e89e..6ba67c523c16 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -191,7 +191,7 @@ static int x25_device_event(struct notifier_block *this, unsigned long event,
191 struct net_device *dev = ptr; 191 struct net_device *dev = ptr;
192 struct x25_neigh *nb; 192 struct x25_neigh *nb;
193 193
194 if (dev->nd_net != &init_net) 194 if (dev_net(dev) != &init_net)
195 return NOTIFY_DONE; 195 return NOTIFY_DONE;
196 196
197 if (dev->type == ARPHRD_X25 197 if (dev->type == ARPHRD_X25
@@ -549,7 +549,7 @@ static struct sock *x25_make_new(struct sock *osk)
549 if (osk->sk_type != SOCK_SEQPACKET) 549 if (osk->sk_type != SOCK_SEQPACKET)
550 goto out; 550 goto out;
551 551
552 if ((sk = x25_alloc_socket(osk->sk_net)) == NULL) 552 if ((sk = x25_alloc_socket(sock_net(osk))) == NULL)
553 goto out; 553 goto out;
554 554
555 x25 = x25_sk(sk); 555 x25 = x25_sk(sk);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index f0679d283110..3ff206c0ae94 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -95,7 +95,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
95 struct sk_buff *nskb; 95 struct sk_buff *nskb;
96 struct x25_neigh *nb; 96 struct x25_neigh *nb;
97 97
98 if (dev->nd_net != &init_net) 98 if (dev_net(dev) != &init_net)
99 goto drop; 99 goto drop;
100 100
101 nskb = skb_copy(skb, GFP_ATOMIC); 101 nskb = skb_copy(skb, GFP_ATOMIC);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9fc4c315f6cd..ab4d0e598a2c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -46,6 +46,7 @@ EXPORT_SYMBOL(xfrm_cfg_mutex);
46 46
47static DEFINE_RWLOCK(xfrm_policy_lock); 47static DEFINE_RWLOCK(xfrm_policy_lock);
48 48
49static struct list_head xfrm_policy_bytype[XFRM_POLICY_TYPE_MAX];
49unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; 50unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
50EXPORT_SYMBOL(xfrm_policy_count); 51EXPORT_SYMBOL(xfrm_policy_count);
51 52
@@ -96,25 +97,52 @@ int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
96 return 0; 97 return 0;
97} 98}
98 99
100static inline struct dst_entry *__xfrm_dst_lookup(int tos,
101 xfrm_address_t *saddr,
102 xfrm_address_t *daddr,
103 int family)
104{
105 struct xfrm_policy_afinfo *afinfo;
106 struct dst_entry *dst;
107
108 afinfo = xfrm_policy_get_afinfo(family);
109 if (unlikely(afinfo == NULL))
110 return ERR_PTR(-EAFNOSUPPORT);
111
112 dst = afinfo->dst_lookup(tos, saddr, daddr);
113
114 xfrm_policy_put_afinfo(afinfo);
115
116 return dst;
117}
118
99static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 119static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
120 xfrm_address_t *prev_saddr,
121 xfrm_address_t *prev_daddr,
100 int family) 122 int family)
101{ 123{
102 xfrm_address_t *saddr = &x->props.saddr; 124 xfrm_address_t *saddr = &x->props.saddr;
103 xfrm_address_t *daddr = &x->id.daddr; 125 xfrm_address_t *daddr = &x->id.daddr;
104 struct xfrm_policy_afinfo *afinfo;
105 struct dst_entry *dst; 126 struct dst_entry *dst;
106 127
107 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) 128 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
108 saddr = x->coaddr; 129 saddr = x->coaddr;
109 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) 130 daddr = prev_daddr;
131 }
132 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
133 saddr = prev_saddr;
110 daddr = x->coaddr; 134 daddr = x->coaddr;
135 }
111 136
112 afinfo = xfrm_policy_get_afinfo(family); 137 dst = __xfrm_dst_lookup(tos, saddr, daddr, family);
113 if (unlikely(afinfo == NULL)) 138
114 return ERR_PTR(-EAFNOSUPPORT); 139 if (!IS_ERR(dst)) {
140 if (prev_saddr != saddr)
141 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
142 if (prev_daddr != daddr)
143 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
144 }
115 145
116 dst = afinfo->dst_lookup(tos, saddr, daddr);
117 xfrm_policy_put_afinfo(afinfo);
118 return dst; 146 return dst;
119} 147}
120 148
@@ -208,6 +236,7 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
208 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 236 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
209 237
210 if (policy) { 238 if (policy) {
239 INIT_LIST_HEAD(&policy->bytype);
211 INIT_HLIST_NODE(&policy->bydst); 240 INIT_HLIST_NODE(&policy->bydst);
212 INIT_HLIST_NODE(&policy->byidx); 241 INIT_HLIST_NODE(&policy->byidx);
213 rwlock_init(&policy->lock); 242 rwlock_init(&policy->lock);
@@ -230,7 +259,11 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
230 if (del_timer(&policy->timer)) 259 if (del_timer(&policy->timer))
231 BUG(); 260 BUG();
232 261
233 security_xfrm_policy_free(policy); 262 write_lock_bh(&xfrm_policy_lock);
263 list_del(&policy->bytype);
264 write_unlock_bh(&xfrm_policy_lock);
265
266 security_xfrm_policy_free(policy->security);
234 kfree(policy); 267 kfree(policy);
235} 268}
236EXPORT_SYMBOL(xfrm_policy_destroy); 269EXPORT_SYMBOL(xfrm_policy_destroy);
@@ -584,6 +617,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
584 policy->curlft.use_time = 0; 617 policy->curlft.use_time = 0;
585 if (!mod_timer(&policy->timer, jiffies + HZ)) 618 if (!mod_timer(&policy->timer, jiffies + HZ))
586 xfrm_pol_hold(policy); 619 xfrm_pol_hold(policy);
620 list_add_tail(&policy->bytype, &xfrm_policy_bytype[policy->type]);
587 write_unlock_bh(&xfrm_policy_lock); 621 write_unlock_bh(&xfrm_policy_lock);
588 622
589 if (delpol) 623 if (delpol)
@@ -642,7 +676,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
642 xfrm_sec_ctx_match(ctx, pol->security)) { 676 xfrm_sec_ctx_match(ctx, pol->security)) {
643 xfrm_pol_hold(pol); 677 xfrm_pol_hold(pol);
644 if (delete) { 678 if (delete) {
645 *err = security_xfrm_policy_delete(pol); 679 *err = security_xfrm_policy_delete(
680 pol->security);
646 if (*err) { 681 if (*err) {
647 write_unlock_bh(&xfrm_policy_lock); 682 write_unlock_bh(&xfrm_policy_lock);
648 return pol; 683 return pol;
@@ -684,7 +719,8 @@ struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
684 if (pol->type == type && pol->index == id) { 719 if (pol->type == type && pol->index == id) {
685 xfrm_pol_hold(pol); 720 xfrm_pol_hold(pol);
686 if (delete) { 721 if (delete) {
687 *err = security_xfrm_policy_delete(pol); 722 *err = security_xfrm_policy_delete(
723 pol->security);
688 if (*err) { 724 if (*err) {
689 write_unlock_bh(&xfrm_policy_lock); 725 write_unlock_bh(&xfrm_policy_lock);
690 return pol; 726 return pol;
@@ -722,7 +758,7 @@ xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
722 &xfrm_policy_inexact[dir], bydst) { 758 &xfrm_policy_inexact[dir], bydst) {
723 if (pol->type != type) 759 if (pol->type != type)
724 continue; 760 continue;
725 err = security_xfrm_policy_delete(pol); 761 err = security_xfrm_policy_delete(pol->security);
726 if (err) { 762 if (err) {
727 xfrm_audit_policy_delete(pol, 0, 763 xfrm_audit_policy_delete(pol, 0,
728 audit_info->loginuid, 764 audit_info->loginuid,
@@ -736,7 +772,8 @@ xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
736 bydst) { 772 bydst) {
737 if (pol->type != type) 773 if (pol->type != type)
738 continue; 774 continue;
739 err = security_xfrm_policy_delete(pol); 775 err = security_xfrm_policy_delete(
776 pol->security);
740 if (err) { 777 if (err) {
741 xfrm_audit_policy_delete(pol, 0, 778 xfrm_audit_policy_delete(pol, 0,
742 audit_info->loginuid, 779 audit_info->loginuid,
@@ -822,57 +859,60 @@ out:
822} 859}
823EXPORT_SYMBOL(xfrm_policy_flush); 860EXPORT_SYMBOL(xfrm_policy_flush);
824 861
825int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), 862int xfrm_policy_walk(struct xfrm_policy_walk *walk,
863 int (*func)(struct xfrm_policy *, int, int, void*),
826 void *data) 864 void *data)
827{ 865{
828 struct xfrm_policy *pol, *last = NULL; 866 struct xfrm_policy *old, *pol, *last = NULL;
829 struct hlist_node *entry; 867 int error = 0;
830 int dir, last_dir = 0, count, error;
831 868
869 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
870 walk->type != XFRM_POLICY_TYPE_ANY)
871 return -EINVAL;
872
873 if (walk->policy == NULL && walk->count != 0)
874 return 0;
875
876 old = pol = walk->policy;
877 walk->policy = NULL;
832 read_lock_bh(&xfrm_policy_lock); 878 read_lock_bh(&xfrm_policy_lock);
833 count = 0;
834 879
835 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { 880 for (; walk->cur_type < XFRM_POLICY_TYPE_MAX; walk->cur_type++) {
836 struct hlist_head *table = xfrm_policy_bydst[dir].table; 881 if (walk->type != walk->cur_type &&
837 int i; 882 walk->type != XFRM_POLICY_TYPE_ANY)
883 continue;
838 884
839 hlist_for_each_entry(pol, entry, 885 if (pol == NULL) {
840 &xfrm_policy_inexact[dir], bydst) { 886 pol = list_first_entry(&xfrm_policy_bytype[walk->cur_type],
841 if (pol->type != type) 887 struct xfrm_policy, bytype);
888 }
889 list_for_each_entry_from(pol, &xfrm_policy_bytype[walk->cur_type], bytype) {
890 if (pol->dead)
842 continue; 891 continue;
843 if (last) { 892 if (last) {
844 error = func(last, last_dir % XFRM_POLICY_MAX, 893 error = func(last, xfrm_policy_id2dir(last->index),
845 count, data); 894 walk->count, data);
846 if (error) 895 if (error) {
896 xfrm_pol_hold(last);
897 walk->policy = last;
847 goto out; 898 goto out;
848 }
849 last = pol;
850 last_dir = dir;
851 count++;
852 }
853 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
854 hlist_for_each_entry(pol, entry, table + i, bydst) {
855 if (pol->type != type)
856 continue;
857 if (last) {
858 error = func(last, last_dir % XFRM_POLICY_MAX,
859 count, data);
860 if (error)
861 goto out;
862 } 899 }
863 last = pol;
864 last_dir = dir;
865 count++;
866 } 900 }
901 last = pol;
902 walk->count++;
867 } 903 }
904 pol = NULL;
868 } 905 }
869 if (count == 0) { 906 if (walk->count == 0) {
870 error = -ENOENT; 907 error = -ENOENT;
871 goto out; 908 goto out;
872 } 909 }
873 error = func(last, last_dir % XFRM_POLICY_MAX, 0, data); 910 if (last)
911 error = func(last, xfrm_policy_id2dir(last->index), 0, data);
874out: 912out:
875 read_unlock_bh(&xfrm_policy_lock); 913 read_unlock_bh(&xfrm_policy_lock);
914 if (old != NULL)
915 xfrm_pol_put(old);
876 return error; 916 return error;
877} 917}
878EXPORT_SYMBOL(xfrm_policy_walk); 918EXPORT_SYMBOL(xfrm_policy_walk);
@@ -894,7 +934,8 @@ static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
894 934
895 match = xfrm_selector_match(sel, fl, family); 935 match = xfrm_selector_match(sel, fl, family);
896 if (match) 936 if (match)
897 ret = security_xfrm_policy_lookup(pol, fl->secid, dir); 937 ret = security_xfrm_policy_lookup(pol->security, fl->secid,
938 dir);
898 939
899 return ret; 940 return ret;
900} 941}
@@ -1011,8 +1052,9 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
1011 int err = 0; 1052 int err = 0;
1012 1053
1013 if (match) { 1054 if (match) {
1014 err = security_xfrm_policy_lookup(pol, fl->secid, 1055 err = security_xfrm_policy_lookup(pol->security,
1015 policy_to_flow_dir(dir)); 1056 fl->secid,
1057 policy_to_flow_dir(dir));
1016 if (!err) 1058 if (!err)
1017 xfrm_pol_hold(pol); 1059 xfrm_pol_hold(pol);
1018 else if (err == -ESRCH) 1060 else if (err == -ESRCH)
@@ -1101,7 +1143,8 @@ static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1101 1143
1102 if (newp) { 1144 if (newp) {
1103 newp->selector = old->selector; 1145 newp->selector = old->selector;
1104 if (security_xfrm_policy_clone(old, newp)) { 1146 if (security_xfrm_policy_clone(old->security,
1147 &newp->security)) {
1105 kfree(newp); 1148 kfree(newp);
1106 return NULL; /* ENOMEM */ 1149 return NULL; /* ENOMEM */
1107 } 1150 }
@@ -1344,6 +1387,9 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1344 int trailer_len = 0; 1387 int trailer_len = 0;
1345 int tos; 1388 int tos;
1346 int family = policy->selector.family; 1389 int family = policy->selector.family;
1390 xfrm_address_t saddr, daddr;
1391
1392 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1347 1393
1348 tos = xfrm_get_tos(fl, family); 1394 tos = xfrm_get_tos(fl, family);
1349 err = tos; 1395 err = tos;
@@ -1374,7 +1420,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1374 1420
1375 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1421 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1376 family = xfrm[i]->props.family; 1422 family = xfrm[i]->props.family;
1377 dst = xfrm_dst_lookup(xfrm[i], tos, family); 1423 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1424 family);
1378 err = PTR_ERR(dst); 1425 err = PTR_ERR(dst);
1379 if (IS_ERR(dst)) 1426 if (IS_ERR(dst))
1380 goto put_states; 1427 goto put_states;
@@ -2038,7 +2085,7 @@ static int stale_bundle(struct dst_entry *dst)
2038void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2085void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2039{ 2086{
2040 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2087 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2041 dst->dev = dev->nd_net->loopback_dev; 2088 dst->dev = dev_net(dev)->loopback_dev;
2042 dev_hold(dst->dev); 2089 dev_hold(dst->dev);
2043 dev_put(dev); 2090 dev_put(dev);
2044 } 2091 }
@@ -2309,7 +2356,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
2309{ 2356{
2310 struct net_device *dev = ptr; 2357 struct net_device *dev = ptr;
2311 2358
2312 if (dev->nd_net != &init_net) 2359 if (dev_net(dev) != &init_net)
2313 return NOTIFY_DONE; 2360 return NOTIFY_DONE;
2314 2361
2315 switch (event) { 2362 switch (event) {
@@ -2365,6 +2412,9 @@ static void __init xfrm_policy_init(void)
2365 panic("XFRM: failed to allocate bydst hash\n"); 2412 panic("XFRM: failed to allocate bydst hash\n");
2366 } 2413 }
2367 2414
2415 for (dir = 0; dir < XFRM_POLICY_TYPE_MAX; dir++)
2416 INIT_LIST_HEAD(&xfrm_policy_bytype[dir]);
2417
2368 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); 2418 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
2369 register_netdevice_notifier(&xfrm_dev_notifier); 2419 register_netdevice_notifier(&xfrm_dev_notifier);
2370} 2420}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 58f1f9347b54..5dcc10b93c86 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -50,6 +50,7 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
50 * Main use is finding SA after policy selected tunnel or transport mode. 50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA. 51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */ 52 */
53static LIST_HEAD(xfrm_state_all);
53static struct hlist_head *xfrm_state_bydst __read_mostly; 54static struct hlist_head *xfrm_state_bydst __read_mostly;
54static struct hlist_head *xfrm_state_bysrc __read_mostly; 55static struct hlist_head *xfrm_state_bysrc __read_mostly;
55static struct hlist_head *xfrm_state_byspi __read_mostly; 56static struct hlist_head *xfrm_state_byspi __read_mostly;
@@ -512,6 +513,7 @@ struct xfrm_state *xfrm_state_alloc(void)
512 if (x) { 513 if (x) {
513 atomic_set(&x->refcnt, 1); 514 atomic_set(&x->refcnt, 1);
514 atomic_set(&x->tunnel_users, 0); 515 atomic_set(&x->tunnel_users, 0);
516 INIT_LIST_HEAD(&x->all);
515 INIT_HLIST_NODE(&x->bydst); 517 INIT_HLIST_NODE(&x->bydst);
516 INIT_HLIST_NODE(&x->bysrc); 518 INIT_HLIST_NODE(&x->bysrc);
517 INIT_HLIST_NODE(&x->byspi); 519 INIT_HLIST_NODE(&x->byspi);
@@ -537,6 +539,10 @@ void __xfrm_state_destroy(struct xfrm_state *x)
537{ 539{
538 BUG_TRAP(x->km.state == XFRM_STATE_DEAD); 540 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
539 541
542 spin_lock_bh(&xfrm_state_lock);
543 list_del(&x->all);
544 spin_unlock_bh(&xfrm_state_lock);
545
540 spin_lock_bh(&xfrm_state_gc_lock); 546 spin_lock_bh(&xfrm_state_gc_lock);
541 hlist_add_head(&x->bydst, &xfrm_state_gc_list); 547 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
542 spin_unlock_bh(&xfrm_state_gc_lock); 548 spin_unlock_bh(&xfrm_state_gc_lock);
@@ -913,6 +919,8 @@ static void __xfrm_state_insert(struct xfrm_state *x)
913 919
914 x->genid = ++xfrm_state_genid; 920 x->genid = ++xfrm_state_genid;
915 921
922 list_add_tail(&x->all, &xfrm_state_all);
923
916 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 924 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
917 x->props.reqid, x->props.family); 925 x->props.reqid, x->props.family);
918 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 926 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
@@ -1522,36 +1530,47 @@ unlock:
1522} 1530}
1523EXPORT_SYMBOL(xfrm_alloc_spi); 1531EXPORT_SYMBOL(xfrm_alloc_spi);
1524 1532
1525int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), 1533int xfrm_state_walk(struct xfrm_state_walk *walk,
1534 int (*func)(struct xfrm_state *, int, void*),
1526 void *data) 1535 void *data)
1527{ 1536{
1528 int i; 1537 struct xfrm_state *old, *x, *last = NULL;
1529 struct xfrm_state *x, *last = NULL;
1530 struct hlist_node *entry;
1531 int count = 0;
1532 int err = 0; 1538 int err = 0;
1533 1539
1540 if (walk->state == NULL && walk->count != 0)
1541 return 0;
1542
1543 old = x = walk->state;
1544 walk->state = NULL;
1534 spin_lock_bh(&xfrm_state_lock); 1545 spin_lock_bh(&xfrm_state_lock);
1535 for (i = 0; i <= xfrm_state_hmask; i++) { 1546 if (x == NULL)
1536 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { 1547 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1537 if (!xfrm_id_proto_match(x->id.proto, proto)) 1548 list_for_each_entry_from(x, &xfrm_state_all, all) {
1538 continue; 1549 if (x->km.state == XFRM_STATE_DEAD)
1539 if (last) { 1550 continue;
1540 err = func(last, count, data); 1551 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1541 if (err) 1552 continue;
1542 goto out; 1553 if (last) {
1554 err = func(last, walk->count, data);
1555 if (err) {
1556 xfrm_state_hold(last);
1557 walk->state = last;
1558 goto out;
1543 } 1559 }
1544 last = x;
1545 count++;
1546 } 1560 }
1561 last = x;
1562 walk->count++;
1547 } 1563 }
1548 if (count == 0) { 1564 if (walk->count == 0) {
1549 err = -ENOENT; 1565 err = -ENOENT;
1550 goto out; 1566 goto out;
1551 } 1567 }
1552 err = func(last, 0, data); 1568 if (last)
1569 err = func(last, 0, data);
1553out: 1570out:
1554 spin_unlock_bh(&xfrm_state_lock); 1571 spin_unlock_bh(&xfrm_state_lock);
1572 if (old != NULL)
1573 xfrm_state_put(old);
1555 return err; 1574 return err;
1556} 1575}
1557EXPORT_SYMBOL(xfrm_state_walk); 1576EXPORT_SYMBOL(xfrm_state_walk);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 019d21de19b3..1810f5645bb5 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -529,8 +529,6 @@ struct xfrm_dump_info {
529 struct sk_buff *out_skb; 529 struct sk_buff *out_skb;
530 u32 nlmsg_seq; 530 u32 nlmsg_seq;
531 u16 nlmsg_flags; 531 u16 nlmsg_flags;
532 int start_idx;
533 int this_idx;
534}; 532};
535 533
536static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 534static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
@@ -597,9 +595,6 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
597 struct nlmsghdr *nlh; 595 struct nlmsghdr *nlh;
598 int err; 596 int err;
599 597
600 if (sp->this_idx < sp->start_idx)
601 goto out;
602
603 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 598 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
604 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 599 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
605 if (nlh == NULL) 600 if (nlh == NULL)
@@ -612,8 +607,6 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
612 goto nla_put_failure; 607 goto nla_put_failure;
613 608
614 nlmsg_end(skb, nlh); 609 nlmsg_end(skb, nlh);
615out:
616 sp->this_idx++;
617 return 0; 610 return 0;
618 611
619nla_put_failure: 612nla_put_failure:
@@ -621,18 +614,32 @@ nla_put_failure:
621 return err; 614 return err;
622} 615}
623 616
617static int xfrm_dump_sa_done(struct netlink_callback *cb)
618{
619 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
620 xfrm_state_walk_done(walk);
621 return 0;
622}
623
624static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 624static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
625{ 625{
626 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
626 struct xfrm_dump_info info; 627 struct xfrm_dump_info info;
627 628
629 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
630 sizeof(cb->args) - sizeof(cb->args[0]));
631
628 info.in_skb = cb->skb; 632 info.in_skb = cb->skb;
629 info.out_skb = skb; 633 info.out_skb = skb;
630 info.nlmsg_seq = cb->nlh->nlmsg_seq; 634 info.nlmsg_seq = cb->nlh->nlmsg_seq;
631 info.nlmsg_flags = NLM_F_MULTI; 635 info.nlmsg_flags = NLM_F_MULTI;
632 info.this_idx = 0; 636
633 info.start_idx = cb->args[0]; 637 if (!cb->args[0]) {
634 (void) xfrm_state_walk(0, dump_one_state, &info); 638 cb->args[0] = 1;
635 cb->args[0] = info.this_idx; 639 xfrm_state_walk_init(walk, 0);
640 }
641
642 (void) xfrm_state_walk(walk, dump_one_state, &info);
636 643
637 return skb->len; 644 return skb->len;
638} 645}
@@ -651,7 +658,6 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
651 info.out_skb = skb; 658 info.out_skb = skb;
652 info.nlmsg_seq = seq; 659 info.nlmsg_seq = seq;
653 info.nlmsg_flags = 0; 660 info.nlmsg_flags = 0;
654 info.this_idx = info.start_idx = 0;
655 661
656 if (dump_one_state(x, 0, &info)) { 662 if (dump_one_state(x, 0, &info)) {
657 kfree_skb(skb); 663 kfree_skb(skb);
@@ -953,7 +959,7 @@ static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs
953 return 0; 959 return 0;
954 960
955 uctx = nla_data(rt); 961 uctx = nla_data(rt);
956 return security_xfrm_policy_alloc(pol, uctx); 962 return security_xfrm_policy_alloc(&pol->security, uctx);
957} 963}
958 964
959static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 965static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
@@ -1137,7 +1143,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1137 NETLINK_CB(skb).sid); 1143 NETLINK_CB(skb).sid);
1138 1144
1139 if (err) { 1145 if (err) {
1140 security_xfrm_policy_free(xp); 1146 security_xfrm_policy_free(xp->security);
1141 kfree(xp); 1147 kfree(xp);
1142 return err; 1148 return err;
1143 } 1149 }
@@ -1229,9 +1235,6 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1229 struct sk_buff *skb = sp->out_skb; 1235 struct sk_buff *skb = sp->out_skb;
1230 struct nlmsghdr *nlh; 1236 struct nlmsghdr *nlh;
1231 1237
1232 if (sp->this_idx < sp->start_idx)
1233 goto out;
1234
1235 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 1238 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1236 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1239 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1237 if (nlh == NULL) 1240 if (nlh == NULL)
@@ -1247,8 +1250,6 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1247 goto nlmsg_failure; 1250 goto nlmsg_failure;
1248 1251
1249 nlmsg_end(skb, nlh); 1252 nlmsg_end(skb, nlh);
1250out:
1251 sp->this_idx++;
1252 return 0; 1253 return 0;
1253 1254
1254nlmsg_failure: 1255nlmsg_failure:
@@ -1256,21 +1257,33 @@ nlmsg_failure:
1256 return -EMSGSIZE; 1257 return -EMSGSIZE;
1257} 1258}
1258 1259
1260static int xfrm_dump_policy_done(struct netlink_callback *cb)
1261{
1262 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1263
1264 xfrm_policy_walk_done(walk);
1265 return 0;
1266}
1267
1259static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1268static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1260{ 1269{
1270 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1261 struct xfrm_dump_info info; 1271 struct xfrm_dump_info info;
1262 1272
1273 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1274 sizeof(cb->args) - sizeof(cb->args[0]));
1275
1263 info.in_skb = cb->skb; 1276 info.in_skb = cb->skb;
1264 info.out_skb = skb; 1277 info.out_skb = skb;
1265 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1278 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1266 info.nlmsg_flags = NLM_F_MULTI; 1279 info.nlmsg_flags = NLM_F_MULTI;
1267 info.this_idx = 0; 1280
1268 info.start_idx = cb->args[0]; 1281 if (!cb->args[0]) {
1269 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info); 1282 cb->args[0] = 1;
1270#ifdef CONFIG_XFRM_SUB_POLICY 1283 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1271 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info); 1284 }
1272#endif 1285
1273 cb->args[0] = info.this_idx; 1286 (void) xfrm_policy_walk(walk, dump_one_policy, &info);
1274 1287
1275 return skb->len; 1288 return skb->len;
1276} 1289}
@@ -1290,7 +1303,6 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1290 info.out_skb = skb; 1303 info.out_skb = skb;
1291 info.nlmsg_seq = seq; 1304 info.nlmsg_seq = seq;
1292 info.nlmsg_flags = 0; 1305 info.nlmsg_flags = 0;
1293 info.this_idx = info.start_idx = 0;
1294 1306
1295 if (dump_one_policy(xp, dir, 0, &info) < 0) { 1307 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1296 kfree_skb(skb); 1308 kfree_skb(skb);
@@ -1325,22 +1337,23 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1325 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err); 1337 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err);
1326 else { 1338 else {
1327 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1339 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1328 struct xfrm_policy tmp; 1340 struct xfrm_sec_ctx *ctx;
1329 1341
1330 err = verify_sec_ctx_len(attrs); 1342 err = verify_sec_ctx_len(attrs);
1331 if (err) 1343 if (err)
1332 return err; 1344 return err;
1333 1345
1334 memset(&tmp, 0, sizeof(struct xfrm_policy)); 1346 ctx = NULL;
1335 if (rt) { 1347 if (rt) {
1336 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1348 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1337 1349
1338 if ((err = security_xfrm_policy_alloc(&tmp, uctx))) 1350 err = security_xfrm_policy_alloc(&ctx, uctx);
1351 if (err)
1339 return err; 1352 return err;
1340 } 1353 }
1341 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, 1354 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, ctx,
1342 delete, &err); 1355 delete, &err);
1343 security_xfrm_policy_free(&tmp); 1356 security_xfrm_policy_free(ctx);
1344 } 1357 }
1345 if (xp == NULL) 1358 if (xp == NULL)
1346 return -ENOENT; 1359 return -ENOENT;
@@ -1560,26 +1573,26 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1560 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err); 1573 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err);
1561 else { 1574 else {
1562 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1575 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1563 struct xfrm_policy tmp; 1576 struct xfrm_sec_ctx *ctx;
1564 1577
1565 err = verify_sec_ctx_len(attrs); 1578 err = verify_sec_ctx_len(attrs);
1566 if (err) 1579 if (err)
1567 return err; 1580 return err;
1568 1581
1569 memset(&tmp, 0, sizeof(struct xfrm_policy)); 1582 ctx = NULL;
1570 if (rt) { 1583 if (rt) {
1571 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1584 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1572 1585
1573 if ((err = security_xfrm_policy_alloc(&tmp, uctx))) 1586 err = security_xfrm_policy_alloc(&ctx, uctx);
1587 if (err)
1574 return err; 1588 return err;
1575 } 1589 }
1576 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, 1590 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, ctx, 0, &err);
1577 0, &err); 1591 security_xfrm_policy_free(ctx);
1578 security_xfrm_policy_free(&tmp);
1579 } 1592 }
1580
1581 if (xp == NULL) 1593 if (xp == NULL)
1582 return -ENOENT; 1594 return -ENOENT;
1595
1583 read_lock(&xp->lock); 1596 read_lock(&xp->lock);
1584 if (xp->dead) { 1597 if (xp->dead) {
1585 read_unlock(&xp->lock); 1598 read_unlock(&xp->lock);
@@ -1888,15 +1901,18 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
1888static struct xfrm_link { 1901static struct xfrm_link {
1889 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 1902 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
1890 int (*dump)(struct sk_buff *, struct netlink_callback *); 1903 int (*dump)(struct sk_buff *, struct netlink_callback *);
1904 int (*done)(struct netlink_callback *);
1891} xfrm_dispatch[XFRM_NR_MSGTYPES] = { 1905} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1892 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 1906 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1893 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 1907 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1894 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 1908 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1895 .dump = xfrm_dump_sa }, 1909 .dump = xfrm_dump_sa,
1910 .done = xfrm_dump_sa_done },
1896 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 1911 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1897 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 1912 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1898 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 1913 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1899 .dump = xfrm_dump_policy }, 1914 .dump = xfrm_dump_policy,
1915 .done = xfrm_dump_policy_done },
1900 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 1916 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1901 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 1917 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1902 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 1918 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
@@ -1935,7 +1951,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1935 if (link->dump == NULL) 1951 if (link->dump == NULL)
1936 return -EINVAL; 1952 return -EINVAL;
1937 1953
1938 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL); 1954 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, link->done);
1939 } 1955 }
1940 1956
1941 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 1957 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,