aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-04-20 10:02:01 -0400
committerPatrick McHardy <kaber@trash.net>2010-04-20 10:02:01 -0400
commit62910554656cdcd6b6f84a5154c4155aae4ca231 (patch)
treedcf14004f6fd2ef7154362ff948bfeba0f3ea92d /net
parent22265a5c3c103cf8c50be62e6c90d045eb649e6d (diff)
parentab9304717f7624c41927f442e6b6d418b2d8b3e4 (diff)
Merge branch 'master' of /repos/git/net-next-2.6
Conflicts: Documentation/feature-removal-schedule.txt net/ipv6/netfilter/ip6t_REJECT.c net/netfilter/xt_limit.c Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c5
-rw-r--r--net/802/p8022.c1
-rw-r--r--net/802/p8023.c1
-rw-r--r--net/802/psnap.c1
-rw-r--r--net/802/stp.c1
-rw-r--r--net/802/tr.c1
-rw-r--r--net/8021q/vlan.c11
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c86
-rw-r--r--net/9p/client.c24
-rw-r--r--net/9p/protocol.c1
-rw-r--r--net/9p/trans_fd.c1
-rw-r--r--net/9p/trans_rdma.c1
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/9p/util.c1
-rw-r--r--net/Kconfig8
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/aarp.c1
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/atm/addr.c1
-rw-r--r--net/atm/atm_sysfs.c1
-rw-r--r--net/atm/br2684.c1
-rw-r--r--net/atm/clip.c1
-rw-r--r--net/atm/common.c1
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/atm/mpc.c1
-rw-r--r--net/atm/mpoa_caches.c1
-rw-r--r--net/atm/mpoa_proc.c1
-rw-r--r--net/atm/pppoatm.c1
-rw-r--r--net/atm/proc.c11
-rw-r--r--net/atm/raw.c1
-rw-r--r--net/atm/resources.c1
-rw-r--r--net/atm/signaling.c1
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/ax25/ax25_dev.c1
-rw-r--r--net/ax25/ax25_ds_subr.c1
-rw-r--r--net/ax25/ax25_iface.c1
-rw-r--r--net/ax25/ax25_in.c1
-rw-r--r--net/ax25/ax25_ip.c1
-rw-r--r--net/ax25/ax25_out.c1
-rw-r--r--net/ax25/ax25_route.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/ax25/ax25_uid.c1
-rw-r--r--net/ax25/sysctl_net_ax25.c1
-rw-r--r--net/bluetooth/af_bluetooth.c1
-rw-r--r--net/bluetooth/bnep/core.c1
-rw-r--r--net/bluetooth/bnep/netdev.c9
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_sysfs.c4
-rw-r--r--net/bluetooth/hidp/sock.c2
-rw-r--r--net/bluetooth/l2cap.c51
-rw-r--r--net/bluetooth/rfcomm/core.c42
-rw-r--r--net/bluetooth/rfcomm/sock.c41
-rw-r--r--net/bluetooth/sco.c41
-rw-r--r--net/bridge/br_fdb.c1
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_ioctl.c1
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter.c1
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_notify.c4
-rw-r--r--net/bridge/br_stp_bpdu.c1
-rw-r--r--net/bridge/netfilter/ebt_ulog.c1
-rw-r--r--net/bridge/netfilter/ebtables.c1
-rw-r--r--net/caif/Kconfig48
-rw-r--r--net/caif/Makefile26
-rw-r--r--net/caif/caif_config_util.c87
-rw-r--r--net/caif/caif_dev.c413
-rw-r--r--net/caif/caif_socket.c1391
-rw-r--r--net/caif/cfcnfg.c530
-rw-r--r--net/caif/cfctrl.c664
-rw-r--r--net/caif/cfdbgl.c40
-rw-r--r--net/caif/cfdgml.c108
-rw-r--r--net/caif/cffrml.c151
-rw-r--r--net/caif/cfmuxl.c246
-rw-r--r--net/caif/cfpkt_skbuff.c571
-rw-r--r--net/caif/cfrfml.c108
-rw-r--r--net/caif/cfserl.c192
-rw-r--r--net/caif/cfsrvl.c185
-rw-r--r--net/caif/cfutill.c115
-rw-r--r--net/caif/cfveil.c107
-rw-r--r--net/caif/cfvidl.c65
-rw-r--r--net/caif/chnl_net.c451
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/can/raw.c3
-rw-r--r--net/compat.c1
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c1
-rw-r--r--net/core/dev.c1077
-rw-r--r--net/core/dev_addr_lists.c741
-rw-r--r--net/core/dev_mcast.c232
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/dst.c46
-rw-r--r--net/core/ethtool.c149
-rw-r--r--net/core/fib_rules.c20
-rw-r--r--net/core/filter.c1
-rw-r--r--net/core/flow.c405
-rw-r--r--net/core/gen_estimator.c1
-rw-r--r--net/core/iovec.c1
-rw-r--r--net/core/link_watch.c1
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/net-sysfs.c104
-rw-r--r--net/core/net-traces.c1
-rw-r--r--net/core/netpoll.c8
-rw-r--r--net/core/pktgen.c58
-rw-r--r--net/core/rtnetlink.c79
-rw-r--r--net/core/scm.c1
-rw-r--r--net/core/sock.c8
-rw-r--r--net/core/sysctl_net_core.c69
-rw-r--r--net/dcb/dcbnl.c1
-rw-r--r--net/dccp/ccid.c2
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/feat.c1
-rw-r--r--net/dccp/input.c3
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/minisocks.c1
-rw-r--r--net/dccp/output.c5
-rw-r--r--net/dccp/probe.c1
-rw-r--r--net/dccp/proto.c1
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/decnet/dn_dev.c13
-rw-r--r--net/decnet/dn_fib.c1
-rw-r--r--net/decnet/dn_neigh.c1
-rw-r--r--net/decnet/dn_nsp_in.c1
-rw-r--r--net/decnet/dn_nsp_out.c1
-rw-r--r--net/decnet/dn_route.c1
-rw-r--r--net/decnet/dn_rules.c22
-rw-r--r--net/decnet/dn_table.c1
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c1
-rw-r--r--net/dsa/dsa.c1
-rw-r--r--net/dsa/slave.c14
-rw-r--r--net/dsa/tag_dsa.c1
-rw-r--r--net/dsa/tag_edsa.c1
-rw-r--r--net/dsa/tag_trailer.c1
-rw-r--r--net/econet/af_econet.c1
-rw-r--r--net/ethernet/pe2.c1
-rw-r--r--net/ieee802154/af_ieee802154.c4
-rw-r--r--net/ieee802154/dgram.c1
-rw-r--r--net/ieee802154/netlink.c1
-rw-r--r--net/ieee802154/nl-mac.c1
-rw-r--r--net/ieee802154/nl-phy.c1
-rw-r--r--net/ieee802154/raw.c1
-rw-r--r--net/ieee802154/wpan-class.c1
-rw-r--r--net/ipv4/Kconfig14
-rw-r--r--net/ipv4/af_inet.c41
-rw-r--r--net/ipv4/ah4.c1
-rw-r--r--net/ipv4/arp.c1
-rw-r--r--net/ipv4/cipso_ipv4.c1
-rw-r--r--net/ipv4/devinet.c7
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/fib_hash.c1
-rw-r--r--net/ipv4/fib_rules.c22
-rw-r--r--net/ipv4/fib_semantics.c1
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/icmp.c6
-rw-r--r--net/ipv4/igmp.c5
-rw-r--r--net/ipv4/inet_diag.c1
-rw-r--r--net/ipv4/inet_fragment.c1
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ip_fragment.c1
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_sockglue.c5
-rw-r--r--net/ipv4/ipconfig.c3
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/ipmr.c829
-rw-r--r--net/ipv4/netfilter.c1
-rw-r--r--net/ipv4/netfilter/arptable_filter.c1
-rw-r--r--net/ipv4/netfilter/ip_queue.c1
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c5
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c1
-rw-r--r--net/ipv4/netfilter/iptable_filter.c1
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c1
-rw-r--r--net/ipv4/netfilter/iptable_raw.c1
-rw-r--r--net/ipv4/netfilter/iptable_security.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c1
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c39
-rw-r--r--net/ipv4/sysctl_net_ipv4.c1
-rw-r--r--net/ipv4/tcp.c67
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_ipv4.c39
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_probe.c1
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/ipv4/tunnel4.c1
-rw-r--r--net/ipv4/udp.c12
-rw-r--r--net/ipv4/xfrm4_input.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv4/xfrm4_policy.c22
-rw-r--r--net/ipv6/addrconf.c807
-rw-r--r--net/ipv6/addrlabel.c1
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/ah6.c1
-rw-r--r--net/ipv6/anycast.c1
-rw-r--r--net/ipv6/datagram.c1
-rw-r--r--net/ipv6/exthdrs.c1
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/icmp.c3
-rw-r--r--net/ipv6/inet6_connection_sock.c5
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_input.c1
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6mr.c15
-rw-r--r--net/ipv6/ipv6_sockglue.c26
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/ndisc.c1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c1
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c4
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c1
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c1
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c3
-rw-r--r--net/ipv6/netfilter/ip6table_security.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/reassembly.c1
-rw-r--r--net/ipv6/route.c16
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/sysctl_net_ipv6.c1
-rw-r--r--net/ipv6/tcp_ipv6.c42
-rw-r--r--net/ipv6/tunnel6.c1
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c1
-rw-r--r--net/ipv6/xfrm6_policy.c31
-rw-r--r--net/ipv6/xfrm6_tunnel.c1
-rw-r--r--net/ipx/af_ipx.c1
-rw-r--r--net/ipx/ipx_route.c1
-rw-r--r--net/irda/af_irda.c1
-rw-r--r--net/irda/discovery.c1
-rw-r--r--net/irda/ircomm/ircomm_core.c1
-rw-r--r--net/irda/ircomm/ircomm_lmp.c1
-rw-r--r--net/irda/ircomm/ircomm_param.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c1
-rw-r--r--net/irda/irda_device.c1
-rw-r--r--net/irda/iriap.c1
-rw-r--r--net/irda/iriap_event.c2
-rw-r--r--net/irda/irias_object.c1
-rw-r--r--net/irda/irlan/irlan_client.c1
-rw-r--r--net/irda/irlan/irlan_common.c1
-rw-r--r--net/irda/irlan/irlan_provider.c1
-rw-r--r--net/irda/irlap_event.c1
-rw-r--r--net/irda/irlap_frame.c1
-rw-r--r--net/irda/irnet/irnet_irda.c1
-rw-r--r--net/irda/irnet/irnet_ppp.c1
-rw-r--r--net/irda/irnetlink.c1
-rw-r--r--net/irda/irqueue.c1
-rw-r--r--net/irda/irttp.c1
-rw-r--r--net/key/af_key.c9
-rw-r--r--net/l2tp/Kconfig107
-rw-r--r--net/l2tp/Makefile12
-rw-r--r--net/l2tp/l2tp_core.c1693
-rw-r--r--net/l2tp/l2tp_core.h304
-rw-r--r--net/l2tp/l2tp_debugfs.c341
-rw-r--r--net/l2tp/l2tp_eth.c361
-rw-r--r--net/l2tp/l2tp_ip.c679
-rw-r--r--net/l2tp/l2tp_netlink.c840
-rw-r--r--net/l2tp/l2tp_ppp.c1837
-rw-r--r--net/lapb/lapb_iface.c1
-rw-r--r--net/lapb/lapb_in.c1
-rw-r--r--net/lapb/lapb_out.c1
-rw-r--r--net/lapb/lapb_subr.c1
-rw-r--r--net/llc/af_llc.c1
-rw-r--r--net/llc/llc_c_ac.c1
-rw-r--r--net/llc/llc_conn.c1
-rw-r--r--net/llc/llc_core.c6
-rw-r--r--net/llc/llc_if.c1
-rw-r--r--net/llc/llc_input.c1
-rw-r--r--net/llc/llc_sap.c1
-rw-r--r--net/llc/llc_station.c1
-rw-r--r--net/mac80211/Kconfig17
-rw-r--r--net/mac80211/agg-rx.c73
-rw-r--r--net/mac80211/agg-tx.c15
-rw-r--r--net/mac80211/cfg.c35
-rw-r--r--net/mac80211/debugfs_key.c1
-rw-r--r--net/mac80211/debugfs_netdev.c13
-rw-r--r--net/mac80211/debugfs_sta.c14
-rw-r--r--net/mac80211/driver-ops.h8
-rw-r--r--net/mac80211/driver-trace.h275
-rw-r--r--net/mac80211/ht.c3
-rw-r--r--net/mac80211/ibss.c17
-rw-r--r--net/mac80211/ieee80211_i.h29
-rw-r--r--net/mac80211/iface.c125
-rw-r--r--net/mac80211/key.c1
-rw-r--r--net/mac80211/led.c1
-rw-r--r--net/mac80211/main.c11
-rw-r--r--net/mac80211/mesh.c8
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c9
-rw-r--r--net/mac80211/mesh_pathtbl.c1
-rw-r--r--net/mac80211/mesh_plink.c3
-rw-r--r--net/mac80211/mlme.c182
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rate.c1
-rw-r--r--net/mac80211/rc80211_minstrel.c3
-rw-r--r--net/mac80211/rc80211_minstrel.h11
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c42
-rw-r--r--net/mac80211/rc80211_pid_algo.c1
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c1
-rw-r--r--net/mac80211/rx.c102
-rw-r--r--net/mac80211/scan.c74
-rw-r--r--net/mac80211/sta_info.c95
-rw-r--r--net/mac80211/sta_info.h12
-rw-r--r--net/mac80211/status.c14
-rw-r--r--net/mac80211/tx.c15
-rw-r--r--net/mac80211/util.c29
-rw-r--r--net/mac80211/wep.c1
-rw-r--r--net/mac80211/work.c8
-rw-r--r--net/mac80211/wpa.c2
-rw-r--r--net/netfilter/core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c1
-rw-r--r--net/netfilter/nf_conntrack_acct.c1
-rw-r--r--net/netfilter/nf_conntrack_amanda.c1
-rw-r--r--net/netfilter/nf_conntrack_ecache.c1
-rw-r--r--net/netfilter/nf_conntrack_ftp.c1
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c1
-rw-r--r--net/netfilter/nf_conntrack_helper.c1
-rw-r--r--net/netfilter/nf_conntrack_irc.c1
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_conntrack_proto.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c1
-rw-r--r--net/netfilter/nf_conntrack_sane.c1
-rw-r--r--net/netfilter/nf_conntrack_standalone.c1
-rw-r--r--net/netfilter/nf_queue.c1
-rw-r--r--net/netfilter/nfnetlink.c4
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nfnetlink_queue.c1
-rw-r--r--net/netfilter/x_tables.c1
-rw-r--r--net/netfilter/xt_CT.c1
-rw-r--r--net/netfilter/xt_LED.c1
-rw-r--r--net/netfilter/xt_RATEEST.c1
-rw-r--r--net/netfilter/xt_TCPMSS.c1
-rw-r--r--net/netfilter/xt_connlimit.c1
-rw-r--r--net/netfilter/xt_dccp.c1
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_limit.c2
-rw-r--r--net/netfilter/xt_quota.c1
-rw-r--r--net/netfilter/xt_recent.c3
-rw-r--r--net/netfilter/xt_statistic.c1
-rw-r--r--net/netfilter/xt_string.c1
-rw-r--r--net/netlabel/netlabel_cipso_v4.c1
-rw-r--r--net/netlabel/netlabel_domainhash.c29
-rw-r--r--net/netlabel/netlabel_kapi.c1
-rw-r--r--net/netlabel/netlabel_mgmt.c1
-rw-r--r--net/netlabel/netlabel_unlabeled.c67
-rw-r--r--net/netlabel/netlabel_user.c1
-rw-r--r--net/netlink/af_netlink.c22
-rw-r--r--net/netlink/genetlink.c7
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/netrom/nr_dev.c1
-rw-r--r--net/netrom/nr_in.c1
-rw-r--r--net/netrom/nr_loopback.c1
-rw-r--r--net/netrom/nr_out.c1
-rw-r--r--net/netrom/nr_route.c1
-rw-r--r--net/netrom/nr_subr.c1
-rw-r--r--net/packet/af_packet.c70
-rw-r--r--net/phonet/af_phonet.c1
-rw-r--r--net/phonet/datagram.c1
-rw-r--r--net/phonet/pep.c1
-rw-r--r--net/phonet/pn_dev.c1
-rw-r--r--net/phonet/pn_netlink.c1
-rw-r--r--net/phonet/socket.c1
-rw-r--r--net/rds/af_rds.c3
-rw-r--r--net/rds/cong.c1
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib.c1
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/ib_rdma.c1
-rw-r--r--net/rds/ib_recv.c1
-rw-r--r--net/rds/info.c1
-rw-r--r--net/rds/iw.c1
-rw-r--r--net/rds/iw_cm.c1
-rw-r--r--net/rds/iw_rdma.c1
-rw-r--r--net/rds/iw_recv.c1
-rw-r--r--net/rds/loop.c1
-rw-r--r--net/rds/message.c1
-rw-r--r--net/rds/page.c1
-rw-r--r--net/rds/rdma.c1
-rw-r--r--net/rds/recv.c1
-rw-r--r--net/rds/send.c1
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/rds/tcp_listen.c1
-rw-r--r--net/rds/tcp_recv.c1
-rw-r--r--net/rfkill/core.c54
-rw-r--r--net/rose/af_rose.c1
-rw-r--r--net/rose/rose_dev.c1
-rw-r--r--net/rose/rose_link.c1
-rw-r--r--net/rose/rose_loopback.c1
-rw-r--r--net/rose/rose_out.c1
-rw-r--r--net/rose/rose_route.c1
-rw-r--r--net/rose/rose_subr.c1
-rw-r--r--net/rxrpc/af_rxrpc.c1
-rw-r--r--net/rxrpc/ar-accept.c7
-rw-r--r--net/rxrpc/ar-ack.c1
-rw-r--r--net/rxrpc/ar-call.c1
-rw-r--r--net/rxrpc/ar-connection.c1
-rw-r--r--net/rxrpc/ar-input.c1
-rw-r--r--net/rxrpc/ar-key.c1
-rw-r--r--net/rxrpc/ar-local.c1
-rw-r--r--net/rxrpc/ar-output.c1
-rw-r--r--net/rxrpc/ar-peer.c1
-rw-r--r--net/rxrpc/ar-transport.c1
-rw-r--r--net/rxrpc/rxkad.c1
-rw-r--r--net/sched/Kconfig5
-rw-r--r--net/sched/act_api.c46
-rw-r--r--net/sched/act_ipt.c1
-rw-r--r--net/sched/act_mirred.c1
-rw-r--r--net/sched/act_pedit.c1
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/cls_api.c31
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_cgroup.c37
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_fw.c1
-rw-r--r--net/sched/cls_route.c1
-rw-r--r--net/sched/cls_tcindex.c1
-rw-r--r--net/sched/cls_u32.c5
-rw-r--r--net/sched/em_meta.c1
-rw-r--r--net/sched/em_nbyte.c1
-rw-r--r--net/sched/em_text.c1
-rw-r--r--net/sched/ematch.c1
-rw-r--r--net/sched/sch_api.c113
-rw-r--r--net/sched/sch_atm.c1
-rw-r--r--net/sched/sch_cbq.c1
-rw-r--r--net/sched/sch_drr.c1
-rw-r--r--net/sched/sch_dsmark.c1
-rw-r--r--net/sched/sch_fifo.c1
-rw-r--r--net/sched/sch_generic.c16
-rw-r--r--net/sched/sch_gred.c1
-rw-r--r--net/sched/sch_htb.c1
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/sctp/auth.c1
-rw-r--r--net/sctp/bind_addr.c1
-rw-r--r--net/sctp/chunk.c1
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/inqueue.c1
-rw-r--r--net/sctp/ipv6.c28
-rw-r--r--net/sctp/output.c1
-rw-r--r--net/sctp/outqueue.c1
-rw-r--r--net/sctp/primitive.c1
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/sm_make_chunk.c1
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/sctp/sm_statefuns.c1
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/ssnmap.c1
-rw-r--r--net/sctp/transport.c1
-rw-r--r--net/sctp/tsnmap.c1
-rw-r--r--net/sctp/ulpevent.c1
-rw-r--r--net/sctp/ulpqueue.c1
-rw-r--r--net/socket.c10
-rw-r--r--net/sunrpc/addr.c1
-rw-r--r--net/sunrpc/auth_generic.c1
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c5
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c1
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c1
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c1
-rw-r--r--net/sunrpc/auth_unix.c1
-rw-r--r--net/sunrpc/backchannel_rqst.c1
-rw-r--r--net/sunrpc/bc_svc.c17
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/rpcb_clnt.c1
-rw-r--r--net/sunrpc/socklib.c1
-rw-r--r--net/sunrpc/stats.c1
-rw-r--r--net/sunrpc/svc.c1
-rw-r--r--net/sunrpc/svc_xprt.c1
-rw-r--r--net/sunrpc/svcauth_unix.c1
-rw-r--r--net/sunrpc/xdr.c1
-rw-r--r--net/sunrpc/xprt.c22
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtrdma/verbs.c1
-rw-r--r--net/sunrpc/xprtsock.c11
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/eth_media.c1
-rw-r--r--net/tipc/link.c8
-rw-r--r--net/tipc/net.c4
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tipc/subscr.c15
-rw-r--r--net/unix/garbage.c1
-rw-r--r--net/unix/sysctl_net_unix.c1
-rw-r--r--net/wimax/op-msg.c1
-rw-r--r--net/wimax/op-reset.c2
-rw-r--r--net/wimax/op-state-get.c2
-rw-r--r--net/wimax/stack.c1
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/core.h15
-rw-r--r--net/wireless/debugfs.c1
-rw-r--r--net/wireless/ibss.c1
-rw-r--r--net/wireless/mlme.c53
-rw-r--r--net/wireless/nl80211.c154
-rw-r--r--net/wireless/nl80211.h6
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/scan.c1
-rw-r--r--net/wireless/sme.c16
-rw-r--r--net/wireless/util.c25
-rw-r--r--net/wireless/wext-compat.c1
-rw-r--r--net/wireless/wext-core.c135
-rw-r--r--net/wireless/wext-priv.c1
-rw-r--r--net/wireless/wext-sme.c1
-rw-r--r--net/x25/af_x25.c68
-rw-r--r--net/x25/x25_dev.c1
-rw-r--r--net/x25/x25_facilities.c27
-rw-r--r--net/x25/x25_forward.c1
-rw-r--r--net/x25/x25_in.c16
-rw-r--r--net/x25/x25_link.c1
-rw-r--r--net/x25/x25_out.c1
-rw-r--r--net/x25/x25_route.c1
-rw-r--r--net/x25/x25_subr.c1
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_output.c1
-rw-r--r--net/xfrm/xfrm_policy.c847
-rw-r--r--net/xfrm/xfrm_state.c6
-rw-r--r--net/xfrm/xfrm_sysctl.c1
-rw-r--r--net/xfrm/xfrm_user.c10
565 files changed, 17553 insertions, 3414 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index 1dcb0660c49d..941f2a324d3a 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -14,6 +14,7 @@
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include <linux/llc.h> 16#include <linux/llc.h>
17#include <linux/slab.h>
17#include <net/llc.h> 18#include <net/llc.h>
18#include <net/llc_pdu.h> 19#include <net/llc_pdu.h>
19#include <net/garp.h> 20#include <net/garp.h>
@@ -575,7 +576,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
575 if (!app) 576 if (!app)
576 goto err2; 577 goto err2;
577 578
578 err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); 579 err = dev_mc_add(dev, appl->proto.group_address);
579 if (err < 0) 580 if (err < 0)
580 goto err3; 581 goto err3;
581 582
@@ -615,7 +616,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
615 garp_pdu_queue(app); 616 garp_pdu_queue(app);
616 garp_queue_xmit(app); 617 garp_queue_xmit(app);
617 618
618 dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); 619 dev_mc_del(dev, appl->proto.group_address);
619 kfree(app); 620 kfree(app);
620 garp_release_port(dev); 621 garp_release_port(dev);
621} 622}
diff --git a/net/802/p8022.c b/net/802/p8022.c
index 2530f35241cd..7f353c4f437a 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/slab.h>
21#include <net/datalink.h> 22#include <net/datalink.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/in.h> 24#include <linux/in.h>
diff --git a/net/802/p8023.c b/net/802/p8023.c
index 6ab1835041a7..1256a40da43c 100644
--- a/net/802/p8023.c
+++ b/net/802/p8023.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/slab.h>
21 22
22#include <net/datalink.h> 23#include <net/datalink.h>
23#include <net/p8022.h> 24#include <net/p8022.h>
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 6fea0750662b..21cde8fd5795 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/slab.h>
17#include <net/datalink.h> 18#include <net/datalink.h>
18#include <net/llc.h> 19#include <net/llc.h>
19#include <net/psnap.h> 20#include <net/psnap.h>
diff --git a/net/802/stp.c b/net/802/stp.c
index 0b7a24452d11..53c8f77f0ccd 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -11,6 +11,7 @@
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/llc.h> 13#include <linux/llc.h>
14#include <linux/slab.h>
14#include <net/llc.h> 15#include <net/llc.h>
15#include <net/llc_pdu.h> 16#include <net/llc_pdu.h>
16#include <net/stp.h> 17#include <net/stp.h>
diff --git a/net/802/tr.c b/net/802/tr.c
index 44acce47fcdc..1c6e596074df 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -36,6 +36,7 @@
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/sysctl.h> 38#include <linux/sysctl.h>
39#include <linux/slab.h>
39#include <net/arp.h> 40#include <net/arp.h>
40#include <net/net_namespace.h> 41#include <net/net_namespace.h>
41 42
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 453512266ea1..3c1c8c14e929 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <linux/slab.h>
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/rculist.h> 27#include <linux/rculist.h>
27#include <net/p8022.h> 28#include <net/p8022.h>
@@ -356,13 +357,13 @@ static void vlan_sync_address(struct net_device *dev,
356 * the new address */ 357 * the new address */
357 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 358 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
358 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 359 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
359 dev_unicast_delete(dev, vlandev->dev_addr); 360 dev_uc_del(dev, vlandev->dev_addr);
360 361
361 /* vlan address was equal to the old address and is different from 362 /* vlan address was equal to the old address and is different from
362 * the new address */ 363 * the new address */
363 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 364 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
364 compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 365 compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
365 dev_unicast_add(dev, vlandev->dev_addr); 366 dev_uc_add(dev, vlandev->dev_addr);
366 367
367 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); 368 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
368} 369}
@@ -378,6 +379,8 @@ static void vlan_transfer_features(struct net_device *dev,
378#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 379#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
379 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 380 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
380#endif 381#endif
382 vlandev->real_num_tx_queues = dev->real_num_tx_queues;
383 BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
381 384
382 if (old_features != vlandev->features) 385 if (old_features != vlandev->features)
383 netdev_features_change(vlandev); 386 netdev_features_change(vlandev);
@@ -530,6 +533,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
530 } 533 }
531 unregister_netdevice_many(&list); 534 unregister_netdevice_many(&list);
532 break; 535 break;
536
537 case NETDEV_PRE_TYPE_CHANGE:
538 /* Forbid underlaying device to change its type. */
539 return NOTIFY_BAD;
533 } 540 }
534 541
535out: 542out:
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index c0316e0ca6e8..c584a0af77d3 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -11,7 +11,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
11 if (netpoll_rx(skb)) 11 if (netpoll_rx(skb))
12 return NET_RX_DROP; 12 return NET_RX_DROP;
13 13
14 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
15 goto drop; 15 goto drop;
16 16
17 skb->skb_iif = skb->dev->ifindex; 17 skb->skb_iif = skb->dev->ifindex;
@@ -83,7 +83,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
83{ 83{
84 struct sk_buff *p; 84 struct sk_buff *p;
85 85
86 if (skb_bond_should_drop(skb)) 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
87 goto drop; 87 goto drop;
88 88
89 skb->skb_iif = skb->dev->ifindex; 89 skb->skb_iif = skb->dev->ifindex;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9e83272fc5b0..b5249c5fd4d3 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h>
24#include <linux/skbuff.h> 25#include <linux/skbuff.h>
25#include <linux/netdevice.h> 26#include <linux/netdevice.h>
26#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
@@ -361,6 +362,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
361 return ret; 362 return ret;
362} 363}
363 364
365static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
366{
367 struct net_device *rdev = vlan_dev_info(dev)->real_dev;
368 const struct net_device_ops *ops = rdev->netdev_ops;
369
370 return ops->ndo_select_queue(rdev, skb);
371}
372
364static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 373static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
365{ 374{
366 /* TODO: gotta make sure the underlying layer can handle it, 375 /* TODO: gotta make sure the underlying layer can handle it,
@@ -461,7 +470,7 @@ static int vlan_dev_open(struct net_device *dev)
461 return -ENETDOWN; 470 return -ENETDOWN;
462 471
463 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { 472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
464 err = dev_unicast_add(real_dev, dev->dev_addr); 473 err = dev_uc_add(real_dev, dev->dev_addr);
465 if (err < 0) 474 if (err < 0)
466 goto out; 475 goto out;
467 } 476 }
@@ -490,7 +499,7 @@ clear_allmulti:
490 dev_set_allmulti(real_dev, -1); 499 dev_set_allmulti(real_dev, -1);
491del_unicast: 500del_unicast:
492 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 501 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
493 dev_unicast_delete(real_dev, dev->dev_addr); 502 dev_uc_del(real_dev, dev->dev_addr);
494out: 503out:
495 netif_carrier_off(dev); 504 netif_carrier_off(dev);
496 return err; 505 return err;
@@ -505,14 +514,14 @@ static int vlan_dev_stop(struct net_device *dev)
505 vlan_gvrp_request_leave(dev); 514 vlan_gvrp_request_leave(dev);
506 515
507 dev_mc_unsync(real_dev, dev); 516 dev_mc_unsync(real_dev, dev);
508 dev_unicast_unsync(real_dev, dev); 517 dev_uc_unsync(real_dev, dev);
509 if (dev->flags & IFF_ALLMULTI) 518 if (dev->flags & IFF_ALLMULTI)
510 dev_set_allmulti(real_dev, -1); 519 dev_set_allmulti(real_dev, -1);
511 if (dev->flags & IFF_PROMISC) 520 if (dev->flags & IFF_PROMISC)
512 dev_set_promiscuity(real_dev, -1); 521 dev_set_promiscuity(real_dev, -1);
513 522
514 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 523 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
515 dev_unicast_delete(real_dev, dev->dev_addr); 524 dev_uc_del(real_dev, dev->dev_addr);
516 525
517 netif_carrier_off(dev); 526 netif_carrier_off(dev);
518 return 0; 527 return 0;
@@ -531,13 +540,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
531 goto out; 540 goto out;
532 541
533 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { 542 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
534 err = dev_unicast_add(real_dev, addr->sa_data); 543 err = dev_uc_add(real_dev, addr->sa_data);
535 if (err < 0) 544 if (err < 0)
536 return err; 545 return err;
537 } 546 }
538 547
539 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 548 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
540 dev_unicast_delete(real_dev, dev->dev_addr); 549 dev_uc_del(real_dev, dev->dev_addr);
541 550
542out: 551out:
543 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 552 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -654,7 +663,7 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
654static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) 663static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
655{ 664{
656 dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); 665 dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
657 dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); 666 dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
658} 667}
659 668
660/* 669/*
@@ -688,7 +697,8 @@ static const struct header_ops vlan_header_ops = {
688 .parse = eth_header_parse, 697 .parse = eth_header_parse,
689}; 698};
690 699
691static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; 700static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
701 vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
692 702
693static int vlan_dev_init(struct net_device *dev) 703static int vlan_dev_init(struct net_device *dev)
694{ 704{
@@ -722,11 +732,17 @@ static int vlan_dev_init(struct net_device *dev)
722 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 732 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
723 dev->header_ops = real_dev->header_ops; 733 dev->header_ops = real_dev->header_ops;
724 dev->hard_header_len = real_dev->hard_header_len; 734 dev->hard_header_len = real_dev->hard_header_len;
725 dev->netdev_ops = &vlan_netdev_accel_ops; 735 if (real_dev->netdev_ops->ndo_select_queue)
736 dev->netdev_ops = &vlan_netdev_accel_ops_sq;
737 else
738 dev->netdev_ops = &vlan_netdev_accel_ops;
726 } else { 739 } else {
727 dev->header_ops = &vlan_header_ops; 740 dev->header_ops = &vlan_header_ops;
728 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 741 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
729 dev->netdev_ops = &vlan_netdev_ops; 742 if (real_dev->netdev_ops->ndo_select_queue)
743 dev->netdev_ops = &vlan_netdev_ops_sq;
744 else
745 dev->netdev_ops = &vlan_netdev_ops;
730 } 746 }
731 747
732 if (is_vlan_dev(real_dev)) 748 if (is_vlan_dev(real_dev))
@@ -865,6 +881,56 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
865#endif 881#endif
866}; 882};
867 883
884static const struct net_device_ops vlan_netdev_ops_sq = {
885 .ndo_select_queue = vlan_dev_select_queue,
886 .ndo_change_mtu = vlan_dev_change_mtu,
887 .ndo_init = vlan_dev_init,
888 .ndo_uninit = vlan_dev_uninit,
889 .ndo_open = vlan_dev_open,
890 .ndo_stop = vlan_dev_stop,
891 .ndo_start_xmit = vlan_dev_hard_start_xmit,
892 .ndo_validate_addr = eth_validate_addr,
893 .ndo_set_mac_address = vlan_dev_set_mac_address,
894 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
895 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
896 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
897 .ndo_do_ioctl = vlan_dev_ioctl,
898 .ndo_neigh_setup = vlan_dev_neigh_setup,
899 .ndo_get_stats = vlan_dev_get_stats,
900#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
901 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
902 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
903 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
904 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
905 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
906#endif
907};
908
909static const struct net_device_ops vlan_netdev_accel_ops_sq = {
910 .ndo_select_queue = vlan_dev_select_queue,
911 .ndo_change_mtu = vlan_dev_change_mtu,
912 .ndo_init = vlan_dev_init,
913 .ndo_uninit = vlan_dev_uninit,
914 .ndo_open = vlan_dev_open,
915 .ndo_stop = vlan_dev_stop,
916 .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
917 .ndo_validate_addr = eth_validate_addr,
918 .ndo_set_mac_address = vlan_dev_set_mac_address,
919 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
920 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
921 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
922 .ndo_do_ioctl = vlan_dev_ioctl,
923 .ndo_neigh_setup = vlan_dev_neigh_setup,
924 .ndo_get_stats = vlan_dev_get_stats,
925#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
926 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
927 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
928 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
929 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
930 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
931#endif
932};
933
868void vlan_setup(struct net_device *dev) 934void vlan_setup(struct net_device *dev)
869{ 935{
870 ether_setup(dev); 936 ether_setup(dev);
diff --git a/net/9p/client.c b/net/9p/client.c
index e3e5bf4469ce..0aa79faa9850 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -29,6 +29,7 @@
29#include <linux/poll.h> 29#include <linux/poll.h>
30#include <linux/idr.h> 30#include <linux/idr.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/slab.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
33#include <linux/uaccess.h> 34#include <linux/uaccess.h>
34#include <net/9p/9p.h> 35#include <net/9p/9p.h>
@@ -71,9 +72,10 @@ inline int p9_is_proto_dotu(struct p9_client *clnt)
71EXPORT_SYMBOL(p9_is_proto_dotu); 72EXPORT_SYMBOL(p9_is_proto_dotu);
72 73
73/* Interpret mount option for protocol version */ 74/* Interpret mount option for protocol version */
74static unsigned char get_protocol_version(const substring_t *name) 75static int get_protocol_version(const substring_t *name)
75{ 76{
76 unsigned char version = -EINVAL; 77 int version = -EINVAL;
78
77 if (!strncmp("9p2000", name->from, name->to-name->from)) { 79 if (!strncmp("9p2000", name->from, name->to-name->from)) {
78 version = p9_proto_legacy; 80 version = p9_proto_legacy;
79 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n"); 81 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n");
@@ -533,7 +535,12 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
533 535
534 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); 536 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
535 537
536 if (c->status != Connected) 538 /* we allow for any status other than disconnected */
539 if (c->status == Disconnected)
540 return ERR_PTR(-EIO);
541
542 /* if status is begin_disconnected we allow only clunk request */
543 if ((c->status == BeginDisconnect) && (type != P9_TCLUNK))
537 return ERR_PTR(-EIO); 544 return ERR_PTR(-EIO);
538 545
539 if (signal_pending(current)) { 546 if (signal_pending(current)) {
@@ -799,8 +806,10 @@ void p9_client_destroy(struct p9_client *clnt)
799 806
800 v9fs_put_trans(clnt->trans_mod); 807 v9fs_put_trans(clnt->trans_mod);
801 808
802 list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) 809 list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) {
810 printk(KERN_INFO "Found fid %d not clunked\n", fid->fid);
803 p9_fid_destroy(fid); 811 p9_fid_destroy(fid);
812 }
804 813
805 if (clnt->fidpool) 814 if (clnt->fidpool)
806 p9_idpool_destroy(clnt->fidpool); 815 p9_idpool_destroy(clnt->fidpool);
@@ -818,6 +827,13 @@ void p9_client_disconnect(struct p9_client *clnt)
818} 827}
819EXPORT_SYMBOL(p9_client_disconnect); 828EXPORT_SYMBOL(p9_client_disconnect);
820 829
830void p9_client_begin_disconnect(struct p9_client *clnt)
831{
832 P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
833 clnt->status = BeginDisconnect;
834}
835EXPORT_SYMBOL(p9_client_begin_disconnect);
836
821struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, 837struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
822 char *uname, u32 n_uname, char *aname) 838 char *uname, u32 n_uname, char *aname)
823{ 839{
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 94f5a8f65e9c..e7541d5b0118 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/slab.h>
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/types.h> 33#include <linux/types.h>
33#include <net/9p/9p.h> 34#include <net/9p/9p.h>
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 31d0b05582a9..98ce9bcb0e15 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -38,6 +38,7 @@
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/file.h> 39#include <linux/file.h>
40#include <linux/parser.h> 40#include <linux/parser.h>
41#include <linux/slab.h>
41#include <net/9p/9p.h> 42#include <net/9p/9p.h>
42#include <net/9p/client.h> 43#include <net/9p/client.h>
43#include <net/9p/transport.h> 44#include <net/9p/transport.h>
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 2c95a89c0f46..041101ab4aa5 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -40,6 +40,7 @@
40#include <linux/file.h> 40#include <linux/file.h>
41#include <linux/parser.h> 41#include <linux/parser.h>
42#include <linux/semaphore.h> 42#include <linux/semaphore.h>
43#include <linux/slab.h>
43#include <net/9p/9p.h> 44#include <net/9p/9p.h>
44#include <net/9p/client.h> 45#include <net/9p/client.h>
45#include <net/9p/transport.h> 46#include <net/9p/transport.h>
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index afde1a89fbb3..7eb78ecc1618 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -37,6 +37,7 @@
37#include <linux/inet.h> 37#include <linux/inet.h>
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/file.h> 39#include <linux/file.h>
40#include <linux/slab.h>
40#include <net/9p/9p.h> 41#include <net/9p/9p.h>
41#include <linux/parser.h> 42#include <linux/parser.h>
42#include <net/9p/client.h> 43#include <net/9p/client.h>
diff --git a/net/9p/util.c b/net/9p/util.c
index dc4ec05ad93d..e048701a72d2 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -30,6 +30,7 @@
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/parser.h> 31#include <linux/parser.h>
32#include <linux/idr.h> 32#include <linux/idr.h>
33#include <linux/slab.h>
33#include <net/9p/9p.h> 34#include <net/9p/9p.h>
34 35
35/** 36/**
diff --git a/net/Kconfig b/net/Kconfig
index 041c35edb763..0d68b40fc0e6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -186,6 +186,7 @@ source "net/sctp/Kconfig"
186source "net/rds/Kconfig" 186source "net/rds/Kconfig"
187source "net/tipc/Kconfig" 187source "net/tipc/Kconfig"
188source "net/atm/Kconfig" 188source "net/atm/Kconfig"
189source "net/l2tp/Kconfig"
189source "net/802/Kconfig" 190source "net/802/Kconfig"
190source "net/bridge/Kconfig" 191source "net/bridge/Kconfig"
191source "net/dsa/Kconfig" 192source "net/dsa/Kconfig"
@@ -203,6 +204,11 @@ source "net/ieee802154/Kconfig"
203source "net/sched/Kconfig" 204source "net/sched/Kconfig"
204source "net/dcb/Kconfig" 205source "net/dcb/Kconfig"
205 206
207config RPS
208 boolean
209 depends on SMP && SYSFS
210 default y
211
206menu "Network testing" 212menu "Network testing"
207 213
208config NET_PKTGEN 214config NET_PKTGEN
@@ -275,5 +281,7 @@ source "net/wimax/Kconfig"
275 281
276source "net/rfkill/Kconfig" 282source "net/rfkill/Kconfig"
277source "net/9p/Kconfig" 283source "net/9p/Kconfig"
284source "net/caif/Kconfig"
285
278 286
279endif # if NET 287endif # if NET
diff --git a/net/Makefile b/net/Makefile
index 1542e7268a7b..cb7bdc1210cb 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_BT) += bluetooth/
40obj-$(CONFIG_SUNRPC) += sunrpc/ 40obj-$(CONFIG_SUNRPC) += sunrpc/
41obj-$(CONFIG_AF_RXRPC) += rxrpc/ 41obj-$(CONFIG_AF_RXRPC) += rxrpc/
42obj-$(CONFIG_ATM) += atm/ 42obj-$(CONFIG_ATM) += atm/
43obj-$(CONFIG_L2TP) += l2tp/
43obj-$(CONFIG_DECNET) += decnet/ 44obj-$(CONFIG_DECNET) += decnet/
44obj-$(CONFIG_ECONET) += econet/ 45obj-$(CONFIG_ECONET) += econet/
45obj-$(CONFIG_PHONET) += phonet/ 46obj-$(CONFIG_PHONET) += phonet/
@@ -56,6 +57,7 @@ obj-$(CONFIG_NETLABEL) += netlabel/
56obj-$(CONFIG_IUCV) += iucv/ 57obj-$(CONFIG_IUCV) += iucv/
57obj-$(CONFIG_RFKILL) += rfkill/ 58obj-$(CONFIG_RFKILL) += rfkill/
58obj-$(CONFIG_NET_9P) += 9p/ 59obj-$(CONFIG_NET_9P) += 9p/
60obj-$(CONFIG_CAIF) += caif/
59ifneq ($(CONFIG_DCB),) 61ifneq ($(CONFIG_DCB),)
60obj-y += dcb/ 62obj-y += dcb/
61endif 63endif
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index f2b3b56aa779..50dce7981321 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -30,6 +30,7 @@
30 */ 30 */
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/slab.h>
33#include <net/sock.h> 34#include <net/sock.h>
34#include <net/datalink.h> 35#include <net/datalink.h>
35#include <net/psnap.h> 36#include <net/psnap.h>
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 9fc4da56fb1d..c410b93fda2e 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -57,6 +57,7 @@
57#include <linux/smp_lock.h> 57#include <linux/smp_lock.h>
58#include <linux/termios.h> /* For TIOCOUTQ/INQ */ 58#include <linux/termios.h> /* For TIOCOUTQ/INQ */
59#include <linux/compat.h> 59#include <linux/compat.h>
60#include <linux/slab.h>
60#include <net/datalink.h> 61#include <net/datalink.h>
61#include <net/psnap.h> 62#include <net/psnap.h>
62#include <net/sock.h> 63#include <net/sock.h>
@@ -781,7 +782,7 @@ static int atif_ioctl(int cmd, void __user *arg)
781 atrtr_create(&rtdef, dev); 782 atrtr_create(&rtdef, dev);
782 } 783 }
783 } 784 }
784 dev_mc_add(dev, aarp_mcast, 6, 1); 785 dev_mc_add_global(dev, aarp_mcast);
785 return 0; 786 return 0;
786 787
787 case SIOCGIFADDR: 788 case SIOCGIFADDR:
diff --git a/net/atm/addr.c b/net/atm/addr.c
index cf3ae8b47572..dcda35c66f15 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -4,6 +4,7 @@
4 4
5#include <linux/atm.h> 5#include <linux/atm.h>
6#include <linux/atmdev.h> 6#include <linux/atmdev.h>
7#include <linux/slab.h>
7#include <linux/uaccess.h> 8#include <linux/uaccess.h>
8 9
9#include "signaling.h" 10#include "signaling.h"
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f693b78eb467..799c631f0fed 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -1,6 +1,7 @@
1/* ATM driver model support. */ 1/* ATM driver model support. */
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/slab.h>
4#include <linux/init.h> 5#include <linux/init.h>
5#include <linux/kobject.h> 6#include <linux/kobject.h>
6#include <linux/atmdev.h> 7#include <linux/atmdev.h>
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 4d64d87e7578..d6c7ceaf13e9 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -18,6 +18,7 @@
18#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
19#include <linux/ip.h> 19#include <linux/ip.h>
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21#include <linux/slab.h>
21#include <net/arp.h> 22#include <net/arp.h>
22#include <linux/atm.h> 23#include <linux/atm.h>
23#include <linux/atmdev.h> 24#include <linux/atmdev.h>
diff --git a/net/atm/clip.c b/net/atm/clip.c
index ebfa022008f7..313aba11316b 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -30,6 +30,7 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/rcupdate.h> 31#include <linux/rcupdate.h>
32#include <linux/jhash.h> 32#include <linux/jhash.h>
33#include <linux/slab.h>
33#include <net/route.h> /* for struct rtable and routing */ 34#include <net/route.h> /* for struct rtable and routing */
34#include <net/icmp.h> /* icmp_send */ 35#include <net/icmp.h> /* icmp_send */
35#include <linux/param.h> /* for HZ */ 36#include <linux/param.h> /* for HZ */
diff --git a/net/atm/common.c b/net/atm/common.c
index 74d095a081e3..97ed94aa0cbc 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/slab.h>
21#include <net/sock.h> /* struct sock */ 22#include <net/sock.h> /* struct sock */
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
23#include <linux/poll.h> 24#include <linux/poll.h>
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5da5753157f9..feeaf5718472 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -6,6 +6,7 @@
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 7#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
8 8
9#include <linux/slab.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/bitops.h> 11#include <linux/bitops.h>
11#include <linux/capability.h> 12#include <linux/capability.h>
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index a6521c8aa88b..436f2e177657 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -2,6 +2,7 @@
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/slab.h>
5#include <linux/timer.h> 6#include <linux/timer.h>
6#include <linux/init.h> 7#include <linux/init.h>
7#include <linux/bitops.h> 8#include <linux/bitops.h>
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 4c141810eb6d..e773d8336918 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -1,5 +1,6 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/atmmpc.h> 2#include <linux/atmmpc.h>
3#include <linux/slab.h>
3#include <linux/time.h> 4#include <linux/time.h>
4 5
5#include "mpoa_caches.h" 6#include "mpoa_caches.h"
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index b9bdb98427e4..53e500292271 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -12,6 +12,7 @@
12#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/atmmpc.h> 13#include <linux/atmmpc.h>
14#include <linux/atm.h> 14#include <linux/atm.h>
15#include <linux/gfp.h>
15#include "mpc.h" 16#include "mpc.h"
16#include "mpoa_caches.h" 17#include "mpoa_caches.h"
17 18
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 400839273c67..e49bb6d948a1 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -38,6 +38,7 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/slab.h>
41#include <linux/atm.h> 42#include <linux/atm.h>
42#include <linux/atmdev.h> 43#include <linux/atmdev.h>
43#include <linux/capability.h> 44#include <linux/capability.h>
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 7a96b2376bd7..6262aeae398e 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -22,6 +22,7 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/atmclip.h> 23#include <linux/atmclip.h>
24#include <linux/init.h> /* for __init */ 24#include <linux/init.h> /* for __init */
25#include <linux/slab.h>
25#include <net/net_namespace.h> 26#include <net/net_namespace.h>
26#include <net/atmclip.h> 27#include <net/atmclip.h>
27#include <linux/uaccess.h> 28#include <linux/uaccess.h>
@@ -406,7 +407,6 @@ EXPORT_SYMBOL(atm_proc_root);
406 407
407int atm_proc_dev_register(struct atm_dev *dev) 408int atm_proc_dev_register(struct atm_dev *dev)
408{ 409{
409 int digits, num;
410 int error; 410 int error;
411 411
412 /* No proc info */ 412 /* No proc info */
@@ -414,16 +414,9 @@ int atm_proc_dev_register(struct atm_dev *dev)
414 return 0; 414 return 0;
415 415
416 error = -ENOMEM; 416 error = -ENOMEM;
417 digits = 0; 417 dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number);
418 for (num = dev->number; num; num /= 10)
419 digits++;
420 if (!digits)
421 digits++;
422
423 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
424 if (!dev->proc_name) 418 if (!dev->proc_name)
425 goto err_out; 419 goto err_out;
426 sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
427 420
428 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, 421 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
429 &proc_atm_dev_ops, dev); 422 &proc_atm_dev_ops, dev);
diff --git a/net/atm/raw.c b/net/atm/raw.c
index d0c4bd047dc4..b4f7b9ff3c74 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/slab.h>
13 14
14#include "common.h" 15#include "common.h"
15#include "protocols.h" 16#include "protocols.h"
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 90082904f20d..d29e58261511 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -19,6 +19,7 @@
19#include <linux/capability.h> 19#include <linux/capability.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/slab.h>
22 23
23#include <net/sock.h> /* for struct sock */ 24#include <net/sock.h> /* for struct sock */
24 25
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index ad1d28ae512b..6ba6e466ee54 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -14,6 +14,7 @@
14#include <linux/atmsvc.h> 14#include <linux/atmsvc.h>
15#include <linux/atmdev.h> 15#include <linux/atmdev.h>
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/slab.h>
17 18
18#include "resources.h" 19#include "resources.h"
19#include "signaling.h" 20#include "signaling.h"
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index a5beedf43e2d..65c5801261f9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -25,6 +25,7 @@
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/sockios.h> 26#include <linux/sockios.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/slab.h>
28#include <net/ax25.h> 29#include <net/ax25.h>
29#include <linux/inet.h> 30#include <linux/inet.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index a7a0e0c9698b..c1cb982f6e86 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -9,6 +9,7 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/socket.h> 11#include <linux/socket.h>
12#include <linux/slab.h>
12#include <linux/in.h> 13#include <linux/in.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/timer.h> 15#include <linux/timer.h>
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index b5e59787be2f..85816e612dc0 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -17,6 +17,7 @@
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/gfp.h>
20#include <net/ax25.h> 21#include <net/ax25.h>
21#include <linux/inet.h> 22#include <linux/inet.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 71338f112108..5a0dda8df492 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/sockios.h> 18#include <linux/sockios.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/slab.h>
20#include <net/ax25.h> 21#include <net/ax25.h>
21#include <linux/inet.h> 22#include <linux/inet.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index de56d3983de0..9bb776541203 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -18,6 +18,7 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/sockios.h> 19#include <linux/sockios.h>
20#include <linux/net.h> 20#include <linux/net.h>
21#include <linux/slab.h>
21#include <net/ax25.h> 22#include <net/ax25.h>
22#include <linux/inet.h> 23#include <linux/inet.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index f047a57aa95c..cf0c47a26530 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index 14912600ec57..37507d806f65 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -19,6 +19,7 @@
19#include <linux/sockios.h> 19#include <linux/sockios.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/net.h> 21#include <linux/net.h>
22#include <linux/slab.h>
22#include <net/ax25.h> 23#include <net/ax25.h>
23#include <linux/inet.h> 24#include <linux/inet.h>
24#include <linux/netdevice.h> 25#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index c833ba4c45a5..7805945a5fd6 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -23,6 +23,7 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/sockios.h> 24#include <linux/sockios.h>
25#include <linux/net.h> 25#include <linux/net.h>
26#include <linux/slab.h>
26#include <net/ax25.h> 27#include <net/ax25.h>
27#include <linux/inet.h> 28#include <linux/inet.h>
28#include <linux/netdevice.h> 29#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 034aa10a5198..c6715ee4ab8f 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -18,6 +18,7 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/sockios.h> 19#include <linux/sockios.h>
20#include <linux/net.h> 20#include <linux/net.h>
21#include <linux/slab.h>
21#include <net/ax25.h> 22#include <net/ax25.h>
22#include <linux/inet.h> 23#include <linux/inet.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 9f13f6eefcba..d349be9578f5 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -18,6 +18,7 @@
18#include <linux/sockios.h> 18#include <linux/sockios.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/slab.h>
21#include <net/ax25.h> 22#include <net/ax25.h>
22#include <linux/inet.h> 23#include <linux/inet.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 5159be6b2625..ebe0ef3f1d83 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com) 7 * Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com)
8 */ 8 */
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/slab.h>
10#include <linux/sysctl.h> 11#include <linux/sysctl.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <net/ax25.h> 13#include <net/ax25.h>
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 087cc51f5927..404a8500fd03 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -31,7 +31,6 @@
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/skbuff.h> 34#include <linux/skbuff.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/poll.h> 36#include <linux/poll.h>
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ef09c7b3a858..8062dad6d10d 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -35,6 +35,7 @@
35#include <linux/freezer.h> 35#include <linux/freezer.h>
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/net.h> 37#include <linux/net.h>
38#include <linux/slab.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/socket.h> 41#include <linux/socket.h>
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 326ab453edb7..d48b33f4d4ba 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -26,6 +26,7 @@
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/slab.h>
29 30
30#include <linux/socket.h> 31#include <linux/socket.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
@@ -87,7 +88,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
87 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); 88 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
88 r->len = htons(ETH_ALEN * 2); 89 r->len = htons(ETH_ALEN * 2);
89 } else { 90 } else {
90 struct dev_mc_list *dmi; 91 struct netdev_hw_addr *ha;
91 int i, len = skb->len; 92 int i, len = skb->len;
92 93
93 if (dev->flags & IFF_BROADCAST) { 94 if (dev->flags & IFF_BROADCAST) {
@@ -98,11 +99,11 @@ static void bnep_net_set_mc_list(struct net_device *dev)
98 /* FIXME: We should group addresses here. */ 99 /* FIXME: We should group addresses here. */
99 100
100 i = 0; 101 i = 0;
101 netdev_for_each_mc_addr(dmi, dev) { 102 netdev_for_each_mc_addr(ha, dev) {
102 if (i == BNEP_MAX_MULTICAST_FILTERS) 103 if (i == BNEP_MAX_MULTICAST_FILTERS)
103 break; 104 break;
104 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 105 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
105 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 106 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
106 } 107 }
107 r->len = htons(skb->len - len); 108 r->len = htons(skb->len - len);
108 } 109 }
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2ff6ac7b2ed4..2862f53b66b1 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -30,7 +30,6 @@
30#include <linux/capability.h> 30#include <linux/capability.h>
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h> 33#include <linux/poll.h>
35#include <linux/fcntl.h> 34#include <linux/fcntl.h>
36#include <linux/skbuff.h> 35#include <linux/skbuff.h>
@@ -39,6 +38,7 @@
39#include <linux/file.h> 38#include <linux/file.h>
40#include <linux/init.h> 39#include <linux/init.h>
41#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/gfp.h>
42#include <net/sock.h> 42#include <net/sock.h>
43 43
44#include <asm/system.h> 44#include <asm/system.h>
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 978cc3a718ad..7ea1979a8e4f 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -26,7 +26,6 @@
26#include <linux/capability.h> 26#include <linux/capability.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/poll.h> 29#include <linux/poll.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/skbuff.h> 31#include <linux/skbuff.h>
@@ -34,6 +33,7 @@
34#include <linux/ioctl.h> 33#include <linux/ioctl.h>
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/gfp.h>
37#include <net/sock.h> 37#include <net/sock.h>
38 38
39#include <linux/isdn/capilli.h> 39#include <linux/isdn/capilli.h>
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index cafb55b0cea5..0e8e1a59856c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,6 +1,7 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/slab.h>
4#include <linux/init.h> 5#include <linux/init.h>
5#include <linux/debugfs.h> 6#include <linux/debugfs.h>
6#include <linux/seq_file.h> 7#include <linux/seq_file.h>
@@ -8,8 +9,7 @@
8#include <net/bluetooth/bluetooth.h> 9#include <net/bluetooth/bluetooth.h>
9#include <net/bluetooth/hci_core.h> 10#include <net/bluetooth/hci_core.h>
10 11
11struct class *bt_class = NULL; 12static struct class *bt_class;
12EXPORT_SYMBOL_GPL(bt_class);
13 13
14struct dentry *bt_debugfs = NULL; 14struct dentry *bt_debugfs = NULL;
15EXPORT_SYMBOL_GPL(bt_debugfs); 15EXPORT_SYMBOL_GPL(bt_debugfs);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 9cfef68b9fec..250dfd46237d 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -26,7 +26,6 @@
26#include <linux/capability.h> 26#include <linux/capability.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/poll.h> 29#include <linux/poll.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/skbuff.h> 31#include <linux/skbuff.h>
@@ -35,6 +34,7 @@
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/gfp.h>
38#include <net/sock.h> 38#include <net/sock.h>
39 39
40#include "hidp.h" 40#include "hidp.h"
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 4db7ae2fe07d..99d68c34e4f1 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <linux/uaccess.h> 45#include <linux/uaccess.h>
44#include <linux/crc16.h> 46#include <linux/crc16.h>
45#include <net/sock.h> 47#include <net/sock.h>
@@ -1000,7 +1002,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1000 1002
1001 BT_DBG("sk %p", sk); 1003 BT_DBG("sk %p", sk);
1002 1004
1003 if (!addr || addr->sa_family != AF_BLUETOOTH) 1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1004 return -EINVAL; 1007 return -EINVAL;
1005 1008
1006 memset(&la, 0, sizeof(la)); 1009 memset(&la, 0, sizeof(la));
@@ -2830,6 +2833,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2830 int len = cmd->len - sizeof(*rsp); 2833 int len = cmd->len - sizeof(*rsp);
2831 char req[64]; 2834 char req[64];
2832 2835
2836 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2837 l2cap_send_disconn_req(conn, sk);
2838 goto done;
2839 }
2840
2833 /* throw out any old stored conf requests */ 2841 /* throw out any old stored conf requests */
2834 result = L2CAP_CONF_SUCCESS; 2842 result = L2CAP_CONF_SUCCESS;
2835 len = l2cap_parse_conf_rsp(sk, rsp->data, 2843 len = l2cap_parse_conf_rsp(sk, rsp->data,
@@ -3937,31 +3945,42 @@ drop:
3937 return 0; 3945 return 0;
3938} 3946}
3939 3947
3940static ssize_t l2cap_sysfs_show(struct class *dev, 3948static int l2cap_debugfs_show(struct seq_file *f, void *p)
3941 struct class_attribute *attr,
3942 char *buf)
3943{ 3949{
3944 struct sock *sk; 3950 struct sock *sk;
3945 struct hlist_node *node; 3951 struct hlist_node *node;
3946 char *str = buf;
3947 3952
3948 read_lock_bh(&l2cap_sk_list.lock); 3953 read_lock_bh(&l2cap_sk_list.lock);
3949 3954
3950 sk_for_each(sk, node, &l2cap_sk_list.head) { 3955 sk_for_each(sk, node, &l2cap_sk_list.head) {
3951 struct l2cap_pinfo *pi = l2cap_pi(sk); 3956 struct l2cap_pinfo *pi = l2cap_pi(sk);
3952 3957
3953 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 3958 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3954 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 3959 batostr(&bt_sk(sk)->src),
3955 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, 3960 batostr(&bt_sk(sk)->dst),
3956 pi->dcid, pi->imtu, pi->omtu, pi->sec_level); 3961 sk->sk_state, __le16_to_cpu(pi->psm),
3962 pi->scid, pi->dcid,
3963 pi->imtu, pi->omtu, pi->sec_level);
3957 } 3964 }
3958 3965
3959 read_unlock_bh(&l2cap_sk_list.lock); 3966 read_unlock_bh(&l2cap_sk_list.lock);
3960 3967
3961 return str - buf; 3968 return 0;
3969}
3970
3971static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3972{
3973 return single_open(file, l2cap_debugfs_show, inode->i_private);
3962} 3974}
3963 3975
3964static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); 3976static const struct file_operations l2cap_debugfs_fops = {
3977 .open = l2cap_debugfs_open,
3978 .read = seq_read,
3979 .llseek = seq_lseek,
3980 .release = single_release,
3981};
3982
3983static struct dentry *l2cap_debugfs;
3965 3984
3966static const struct proto_ops l2cap_sock_ops = { 3985static const struct proto_ops l2cap_sock_ops = {
3967 .family = PF_BLUETOOTH, 3986 .family = PF_BLUETOOTH,
@@ -4021,8 +4040,12 @@ static int __init l2cap_init(void)
4021 goto error; 4040 goto error;
4022 } 4041 }
4023 4042
4024 if (class_create_file(bt_class, &class_attr_l2cap) < 0) 4043 if (bt_debugfs) {
4025 BT_ERR("Failed to create L2CAP info file"); 4044 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4045 bt_debugfs, NULL, &l2cap_debugfs_fops);
4046 if (!l2cap_debugfs)
4047 BT_ERR("Failed to create L2CAP debug file");
4048 }
4026 4049
4027 BT_INFO("L2CAP ver %s", VERSION); 4050 BT_INFO("L2CAP ver %s", VERSION);
4028 BT_INFO("L2CAP socket layer initialized"); 4051 BT_INFO("L2CAP socket layer initialized");
@@ -4036,7 +4059,7 @@ error:
4036 4059
4037static void __exit l2cap_exit(void) 4060static void __exit l2cap_exit(void)
4038{ 4061{
4039 class_remove_file(bt_class, &class_attr_l2cap); 4062 debugfs_remove(l2cap_debugfs);
4040 4063
4041 if (bt_sock_unregister(BTPROTO_L2CAP) < 0) 4064 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4042 BT_ERR("L2CAP socket unregistration failed"); 4065 BT_ERR("L2CAP socket unregistration failed");
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index db8a68e1a5ba..7dca91bb8c57 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,9 +33,12 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
36#include <linux/net.h> 38#include <linux/net.h>
37#include <linux/mutex.h> 39#include <linux/mutex.h>
38#include <linux/kthread.h> 40#include <linux/kthread.h>
41#include <linux/slab.h>
39 42
40#include <net/sock.h> 43#include <net/sock.h>
41#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -2098,13 +2101,10 @@ static struct hci_cb rfcomm_cb = {
2098 .security_cfm = rfcomm_security_cfm 2101 .security_cfm = rfcomm_security_cfm
2099}; 2102};
2100 2103
2101static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, 2104static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
2102 struct class_attribute *attr,
2103 char *buf)
2104{ 2105{
2105 struct rfcomm_session *s; 2106 struct rfcomm_session *s;
2106 struct list_head *pp, *p; 2107 struct list_head *pp, *p;
2107 char *str = buf;
2108 2108
2109 rfcomm_lock(); 2109 rfcomm_lock();
2110 2110
@@ -2114,18 +2114,32 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev,
2114 struct sock *sk = s->sock->sk; 2114 struct sock *sk = s->sock->sk;
2115 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); 2115 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
2116 2116
2117 str += sprintf(str, "%s %s %ld %d %d %d %d\n", 2117 seq_printf(f, "%s %s %ld %d %d %d %d\n",
2118 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 2118 batostr(&bt_sk(sk)->src),
2119 d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); 2119 batostr(&bt_sk(sk)->dst),
2120 d->state, d->dlci, d->mtu,
2121 d->rx_credits, d->tx_credits);
2120 } 2122 }
2121 } 2123 }
2122 2124
2123 rfcomm_unlock(); 2125 rfcomm_unlock();
2124 2126
2125 return (str - buf); 2127 return 0;
2126} 2128}
2127 2129
2128static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); 2130static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file)
2131{
2132 return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private);
2133}
2134
2135static const struct file_operations rfcomm_dlc_debugfs_fops = {
2136 .open = rfcomm_dlc_debugfs_open,
2137 .read = seq_read,
2138 .llseek = seq_lseek,
2139 .release = single_release,
2140};
2141
2142static struct dentry *rfcomm_dlc_debugfs;
2129 2143
2130/* ---- Initialization ---- */ 2144/* ---- Initialization ---- */
2131static int __init rfcomm_init(void) 2145static int __init rfcomm_init(void)
@@ -2142,8 +2156,12 @@ static int __init rfcomm_init(void)
2142 goto unregister; 2156 goto unregister;
2143 } 2157 }
2144 2158
2145 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2159 if (bt_debugfs) {
2146 BT_ERR("Failed to create RFCOMM info file"); 2160 rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
2161 bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
2162 if (!rfcomm_dlc_debugfs)
2163 BT_ERR("Failed to create RFCOMM debug file");
2164 }
2147 2165
2148 err = rfcomm_init_ttys(); 2166 err = rfcomm_init_ttys();
2149 if (err < 0) 2167 if (err < 0)
@@ -2171,7 +2189,7 @@ unregister:
2171 2189
2172static void __exit rfcomm_exit(void) 2190static void __exit rfcomm_exit(void)
2173{ 2191{
2174 class_remove_file(bt_class, &class_attr_rfcomm_dlc); 2192 debugfs_remove(rfcomm_dlc_debugfs);
2175 2193
2176 hci_unregister_cb(&rfcomm_cb); 2194 hci_unregister_cb(&rfcomm_cb);
2177 2195
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index ca87d6ac6a20..8ed3c37684fa 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <net/sock.h> 45#include <net/sock.h>
44 46
45#include <asm/system.h> 47#include <asm/system.h>
@@ -395,7 +397,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
395 397
396 BT_DBG("sk %p", sk); 398 BT_DBG("sk %p", sk);
397 399
398 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) 400 if (alen < sizeof(struct sockaddr_rc) ||
401 addr->sa_family != AF_BLUETOOTH)
399 return -EINVAL; 402 return -EINVAL;
400 403
401 lock_sock(sk); 404 lock_sock(sk);
@@ -1061,28 +1064,38 @@ done:
1061 return result; 1064 return result;
1062} 1065}
1063 1066
1064static ssize_t rfcomm_sock_sysfs_show(struct class *dev, 1067static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
1065 struct class_attribute *attr,
1066 char *buf)
1067{ 1068{
1068 struct sock *sk; 1069 struct sock *sk;
1069 struct hlist_node *node; 1070 struct hlist_node *node;
1070 char *str = buf;
1071 1071
1072 read_lock_bh(&rfcomm_sk_list.lock); 1072 read_lock_bh(&rfcomm_sk_list.lock);
1073 1073
1074 sk_for_each(sk, node, &rfcomm_sk_list.head) { 1074 sk_for_each(sk, node, &rfcomm_sk_list.head) {
1075 str += sprintf(str, "%s %s %d %d\n", 1075 seq_printf(f, "%s %s %d %d\n",
1076 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 1076 batostr(&bt_sk(sk)->src),
1077 batostr(&bt_sk(sk)->dst),
1077 sk->sk_state, rfcomm_pi(sk)->channel); 1078 sk->sk_state, rfcomm_pi(sk)->channel);
1078 } 1079 }
1079 1080
1080 read_unlock_bh(&rfcomm_sk_list.lock); 1081 read_unlock_bh(&rfcomm_sk_list.lock);
1081 1082
1082 return (str - buf); 1083 return 0;
1083} 1084}
1084 1085
1085static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); 1086static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
1087{
1088 return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
1089}
1090
1091static const struct file_operations rfcomm_sock_debugfs_fops = {
1092 .open = rfcomm_sock_debugfs_open,
1093 .read = seq_read,
1094 .llseek = seq_lseek,
1095 .release = single_release,
1096};
1097
1098static struct dentry *rfcomm_sock_debugfs;
1086 1099
1087static const struct proto_ops rfcomm_sock_ops = { 1100static const struct proto_ops rfcomm_sock_ops = {
1088 .family = PF_BLUETOOTH, 1101 .family = PF_BLUETOOTH,
@@ -1122,8 +1135,12 @@ int __init rfcomm_init_sockets(void)
1122 if (err < 0) 1135 if (err < 0)
1123 goto error; 1136 goto error;
1124 1137
1125 if (class_create_file(bt_class, &class_attr_rfcomm) < 0) 1138 if (bt_debugfs) {
1126 BT_ERR("Failed to create RFCOMM info file"); 1139 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
1140 bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
1141 if (!rfcomm_sock_debugfs)
1142 BT_ERR("Failed to create RFCOMM debug file");
1143 }
1127 1144
1128 BT_INFO("RFCOMM socket layer initialized"); 1145 BT_INFO("RFCOMM socket layer initialized");
1129 1146
@@ -1137,7 +1154,7 @@ error:
1137 1154
1138void rfcomm_cleanup_sockets(void) 1155void rfcomm_cleanup_sockets(void)
1139{ 1156{
1140 class_remove_file(bt_class, &class_attr_rfcomm); 1157 debugfs_remove(rfcomm_sock_debugfs);
1141 1158
1142 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) 1159 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
1143 BT_ERR("RFCOMM socket layer unregistration failed"); 1160 BT_ERR("RFCOMM socket layer unregistration failed");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f93b939539bc..ca6b2ad1c3fc 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -38,6 +38,8 @@
38#include <linux/socket.h> 38#include <linux/socket.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/debugfs.h>
42#include <linux/seq_file.h>
41#include <linux/list.h> 43#include <linux/list.h>
42#include <net/sock.h> 44#include <net/sock.h>
43 45
@@ -497,7 +499,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
497 499
498 BT_DBG("sk %p", sk); 500 BT_DBG("sk %p", sk);
499 501
500 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) 502 if (alen < sizeof(struct sockaddr_sco) ||
503 addr->sa_family != AF_BLUETOOTH)
501 return -EINVAL; 504 return -EINVAL;
502 505
503 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) 506 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
@@ -953,28 +956,36 @@ drop:
953 return 0; 956 return 0;
954} 957}
955 958
956static ssize_t sco_sysfs_show(struct class *dev, 959static int sco_debugfs_show(struct seq_file *f, void *p)
957 struct class_attribute *attr,
958 char *buf)
959{ 960{
960 struct sock *sk; 961 struct sock *sk;
961 struct hlist_node *node; 962 struct hlist_node *node;
962 char *str = buf;
963 963
964 read_lock_bh(&sco_sk_list.lock); 964 read_lock_bh(&sco_sk_list.lock);
965 965
966 sk_for_each(sk, node, &sco_sk_list.head) { 966 sk_for_each(sk, node, &sco_sk_list.head) {
967 str += sprintf(str, "%s %s %d\n", 967 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
968 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 968 batostr(&bt_sk(sk)->dst), sk->sk_state);
969 sk->sk_state);
970 } 969 }
971 970
972 read_unlock_bh(&sco_sk_list.lock); 971 read_unlock_bh(&sco_sk_list.lock);
973 972
974 return (str - buf); 973 return 0;
975} 974}
976 975
977static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); 976static int sco_debugfs_open(struct inode *inode, struct file *file)
977{
978 return single_open(file, sco_debugfs_show, inode->i_private);
979}
980
981static const struct file_operations sco_debugfs_fops = {
982 .open = sco_debugfs_open,
983 .read = seq_read,
984 .llseek = seq_lseek,
985 .release = single_release,
986};
987
988static struct dentry *sco_debugfs;
978 989
979static const struct proto_ops sco_sock_ops = { 990static const struct proto_ops sco_sock_ops = {
980 .family = PF_BLUETOOTH, 991 .family = PF_BLUETOOTH,
@@ -1032,8 +1043,12 @@ static int __init sco_init(void)
1032 goto error; 1043 goto error;
1033 } 1044 }
1034 1045
1035 if (class_create_file(bt_class, &class_attr_sco) < 0) 1046 if (bt_debugfs) {
1036 BT_ERR("Failed to create SCO info file"); 1047 sco_debugfs = debugfs_create_file("sco", 0444,
1048 bt_debugfs, NULL, &sco_debugfs_fops);
1049 if (!sco_debugfs)
1050 BT_ERR("Failed to create SCO debug file");
1051 }
1037 1052
1038 BT_INFO("SCO (Voice Link) ver %s", VERSION); 1053 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1039 BT_INFO("SCO socket layer initialized"); 1054 BT_INFO("SCO socket layer initialized");
@@ -1047,7 +1062,7 @@ error:
1047 1062
1048static void __exit sco_exit(void) 1063static void __exit sco_exit(void)
1049{ 1064{
1050 class_remove_file(bt_class, &class_attr_sco); 1065 debugfs_remove(sco_debugfs);
1051 1066
1052 if (bt_sock_unregister(BTPROTO_SCO) < 0) 1067 if (bt_sock_unregister(BTPROTO_SCO) < 0)
1053 BT_ERR("SCO socket unregistration failed"); 1068 BT_ERR("SCO socket unregistration failed");
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 3b8e038ab32c..9101a4e56201 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -20,6 +20,7 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/jhash.h> 21#include <linux/jhash.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/slab.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
25#include "br_private.h" 26#include "br_private.h"
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 15abef7349f3..92fb3293a215 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/slab.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/netdevice.h> 17#include <linux/netdevice.h>
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index b7cdd2e98050..521439333316 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/if_ether.h> 21#include <linux/if_ether.h>
22#include <linux/slab.h>
22#include <net/sock.h> 23#include <net/sock.h>
23 24
24#include "br_private.h" 25#include "br_private.h"
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index c9018fc72d24..d36e700f7a26 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -11,6 +11,7 @@
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/slab.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 2af6e4a90262..995afc4b04dc 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/if_bridge.h> 16#include <linux/if_bridge.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/slab.h>
18#include <linux/times.h> 19#include <linux/times.h>
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ed19b0a730ab..8ccdb8ee3928 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -723,7 +723,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
723 if (!pskb_may_pull(skb, len)) 723 if (!pskb_may_pull(skb, len))
724 return -EINVAL; 724 return -EINVAL;
725 725
726 grec = (void *)(skb->data + len); 726 grec = (void *)(skb->data + len - sizeof(*grec));
727 group = grec->grec_mca; 727 group = grec->grec_mca;
728 type = grec->grec_type; 728 type = grec->grec_type;
729 729
@@ -1003,8 +1003,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1003 if (!pskb_may_pull(skb2, sizeof(*ih))) 1003 if (!pskb_may_pull(skb2, sizeof(*ih)))
1004 goto out; 1004 goto out;
1005 1005
1006 iph = ip_hdr(skb2);
1007
1008 switch (skb2->ip_summed) { 1006 switch (skb2->ip_summed) {
1009 case CHECKSUM_COMPLETE: 1007 case CHECKSUM_COMPLETE:
1010 if (!csum_fold(skb2->csum)) 1008 if (!csum_fold(skb2->csum))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index b7e405dc9d1c..6b80ebc37667 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/slab.h>
19#include <linux/ip.h> 20#include <linux/ip.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
21#include <linux/skbuff.h> 22#include <linux/skbuff.h>
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index fcffb3fb1177..aa56ac2c8829 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h>
14#include <net/rtnetlink.h> 15#include <net/rtnetlink.h>
15#include <net/net_namespace.h> 16#include <net/net_namespace.h>
16#include <net/sock.h> 17#include <net/sock.h>
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 763a3ec292e5..1413b72acc7f 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -82,6 +82,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
82 case NETDEV_UNREGISTER: 82 case NETDEV_UNREGISTER:
83 br_del_if(br, dev); 83 br_del_if(br, dev);
84 break; 84 break;
85
86 case NETDEV_PRE_TYPE_CHANGE:
87 /* Forbid underlaying device to change its type. */
88 return NOTIFY_BAD;
85 } 89 }
86 90
87 /* Events that may cause spanning tree to refresh */ 91 /* Events that may cause spanning tree to refresh */
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 11b0157f69c3..217bd225a42f 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -15,6 +15,7 @@
15#include <linux/netfilter_bridge.h> 15#include <linux/netfilter_bridge.h>
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/llc.h> 17#include <linux/llc.h>
18#include <linux/slab.h>
18#include <net/net_namespace.h> 19#include <net/net_namespace.h>
19#include <net/llc.h> 20#include <net/llc.h>
20#include <net/llc_pdu.h> 21#include <net/llc_pdu.h>
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index f77b42d8e87d..852f37c27659 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -29,6 +29,7 @@
29 */ 29 */
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h>
32#include <linux/spinlock.h> 33#include <linux/spinlock.h>
33#include <linux/socket.h> 34#include <linux/socket.h>
34#include <linux/skbuff.h> 35#include <linux/skbuff.h>
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index c41f3fad0587..1d8c2c0a7470 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -22,6 +22,7 @@
22#include <linux/netfilter_bridge/ebtables.h> 22#include <linux/netfilter_bridge/ebtables.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include <linux/smp.h> 27#include <linux/smp.h>
27#include <linux/cpumask.h> 28#include <linux/cpumask.h>
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
new file mode 100644
index 000000000000..cd1daf6008bd
--- /dev/null
+++ b/net/caif/Kconfig
@@ -0,0 +1,48 @@
1#
2# CAIF net configurations
3#
4
5#menu "CAIF Support"
6comment "CAIF Support"
7menuconfig CAIF
8 tristate "Enable CAIF support"
9 select CRC_CCITT
10 default n
11 ---help---
12 The "Communication CPU to Application CPU Interface" (CAIF) is a packet
13 based connection-oriented MUX protocol developed by ST-Ericsson for use
14 with its modems. It is accessed from user space as sockets (PF_CAIF).
15
16 Say Y (or M) here if you build for a phone product (e.g. Android or
17 MeeGo ) that uses CAIF as transport, if unsure say N.
18
19 If you select to build it as module then CAIF_NETDEV also needs to be
20 built as modules. You will also need to say yes to any CAIF physical
21 devices that your platform requires.
22
23 See Documentation/networking/caif for a further explanation on how to
24 use and configure CAIF.
25
26if CAIF
27
28config CAIF_DEBUG
29 bool "Enable Debug"
30 default n
31 --- help ---
32 Enable the inclusion of debug code in the CAIF stack.
33 Be aware that doing this will impact performance.
34 If unsure say N.
35
36
37config CAIF_NETDEV
38 tristate "CAIF GPRS Network device"
39 default CAIF
40 ---help---
41 Say Y if you will be using a CAIF based GPRS network device.
42 This can be either built-in or a loadable module,
43 If you select to build it as a built-in then the main CAIF device must
44 also be a built-in.
45 If unsure say Y.
46
47endif
48#endmenu
diff --git a/net/caif/Makefile b/net/caif/Makefile
new file mode 100644
index 000000000000..34852af2595e
--- /dev/null
+++ b/net/caif/Makefile
@@ -0,0 +1,26 @@
1ifeq ($(CONFIG_CAIF_DEBUG),1)
2CAIF_DBG_FLAGS := -DDEBUG
3endif
4
5ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
6
7caif-objs := caif_dev.o \
8 cfcnfg.o cfmuxl.o cfctrl.o \
9 cffrml.o cfveil.o cfdbgl.o\
10 cfserl.o cfdgml.o \
11 cfrfml.o cfvidl.o cfutill.o \
12 cfsrvl.o cfpkt_skbuff.o caif_config_util.o
13clean-dirs:= .tmp_versions
14
15clean-files:= \
16 Module.symvers \
17 modules.order \
18 *.cmd \
19 *.o \
20 *~
21
22obj-$(CONFIG_CAIF) += caif.o
23obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
24obj-$(CONFIG_CAIF) += caif_socket.o
25
26export-objs := caif.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
new file mode 100644
index 000000000000..6f36580366f0
--- /dev/null
+++ b/net/caif/caif_config_util.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <net/caif/cfctrl.h>
10#include <net/caif/cfcnfg.h>
11#include <net/caif/caif_dev.h>
12
13int connect_req_to_link_param(struct cfcnfg *cnfg,
14 struct caif_connect_request *s,
15 struct cfctrl_link_param *l)
16{
17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref;
19 memset(l, 0, sizeof(*l));
20 l->priority = s->priority;
21
22 if (s->link_name[0] != '\0')
23 l->phyid = cfcnfg_get_named(cnfg, s->link_name);
24 else {
25 switch (s->link_selector) {
26 case CAIF_LINK_HIGH_BANDW:
27 pref = CFPHYPREF_HIGH_BW;
28 break;
29 case CAIF_LINK_LOW_LATENCY:
30 pref = CFPHYPREF_LOW_LAT;
31 break;
32 default:
33 return -EINVAL;
34 }
35 dev_info = cfcnfg_get_phyid(cnfg, pref);
36 if (dev_info == NULL)
37 return -ENODEV;
38 l->phyid = dev_info->id;
39 }
40 switch (s->protocol) {
41 case CAIFPROTO_AT:
42 l->linktype = CFCTRL_SRV_VEI;
43 if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN)
44 l->chtype = 0x02;
45 else
46 l->chtype = s->sockaddr.u.at.type;
47 l->endpoint = 0x00;
48 break;
49 case CAIFPROTO_DATAGRAM:
50 l->linktype = CFCTRL_SRV_DATAGRAM;
51 l->chtype = 0x00;
52 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
53 break;
54 case CAIFPROTO_DATAGRAM_LOOP:
55 l->linktype = CFCTRL_SRV_DATAGRAM;
56 l->chtype = 0x03;
57 l->endpoint = 0x00;
58 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
59 break;
60 case CAIFPROTO_RFM:
61 l->linktype = CFCTRL_SRV_RFM;
62 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
63 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
64 sizeof(l->u.rfm.volume)-1);
65 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
66 break;
67 case CAIFPROTO_UTIL:
68 l->linktype = CFCTRL_SRV_UTIL;
69 l->endpoint = 0x00;
70 l->chtype = 0x00;
71 strncpy(l->u.utility.name, s->sockaddr.u.util.service,
72 sizeof(l->u.utility.name)-1);
73 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
74 caif_assert(sizeof(l->u.utility.name) > 10);
75 l->u.utility.paramlen = s->param.size;
76 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
77 l->u.utility.paramlen = sizeof(l->u.utility.params);
78
79 memcpy(l->u.utility.params, s->param.data,
80 l->u.utility.paramlen);
81
82 break;
83 default:
84 return -EINVAL;
85 }
86 return 0;
87}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
new file mode 100644
index 000000000000..e84837e1bc86
--- /dev/null
+++ b/net/caif/caif_dev.c
@@ -0,0 +1,413 @@
1/*
2 * CAIF Interface registration.
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to
8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * and Sakari Ailus <sakari.ailus@nokia.com>
10 */
11
12#include <linux/version.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/if_arp.h>
16#include <linux/net.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <net/netns/generic.h>
22#include <net/net_namespace.h>
23#include <net/pkt_sched.h>
24#include <net/caif/caif_device.h>
25#include <net/caif/caif_dev.h>
26#include <net/caif/caif_layer.h>
27#include <net/caif/cfpkt.h>
28#include <net/caif/cfcnfg.h>
29
30MODULE_LICENSE("GPL");
31#define TIMEOUT (HZ*5)
32
33/* Used for local tracking of the CAIF net devices */
34struct caif_device_entry {
35 struct cflayer layer;
36 struct list_head list;
37 atomic_t in_use;
38 atomic_t state;
39 u16 phyid;
40 struct net_device *netdev;
41 wait_queue_head_t event;
42};
43
44struct caif_device_entry_list {
45 struct list_head list;
46 /* Protects simulanous deletes in list */
47 spinlock_t lock;
48};
49
50struct caif_net {
51 struct caif_device_entry_list caifdevs;
52};
53
54static int caif_net_id;
55static struct cfcnfg *cfg;
56
57static struct caif_device_entry_list *caif_device_list(struct net *net)
58{
59 struct caif_net *caifn;
60 BUG_ON(!net);
61 caifn = net_generic(net, caif_net_id);
62 BUG_ON(!caifn);
63 return &caifn->caifdevs;
64}
65
66/* Allocate new CAIF device. */
67static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
68{
69 struct caif_device_entry_list *caifdevs;
70 struct caif_device_entry *caifd;
71 caifdevs = caif_device_list(dev_net(dev));
72 BUG_ON(!caifdevs);
73 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
74 if (!caifd)
75 return NULL;
76 caifd->netdev = dev;
77 list_add(&caifd->list, &caifdevs->list);
78 init_waitqueue_head(&caifd->event);
79 return caifd;
80}
81
82static struct caif_device_entry *caif_get(struct net_device *dev)
83{
84 struct caif_device_entry_list *caifdevs =
85 caif_device_list(dev_net(dev));
86 struct caif_device_entry *caifd;
87 BUG_ON(!caifdevs);
88 list_for_each_entry(caifd, &caifdevs->list, list) {
89 if (caifd->netdev == dev)
90 return caifd;
91 }
92 return NULL;
93}
94
95static void caif_device_destroy(struct net_device *dev)
96{
97 struct caif_device_entry_list *caifdevs =
98 caif_device_list(dev_net(dev));
99 struct caif_device_entry *caifd;
100 ASSERT_RTNL();
101 if (dev->type != ARPHRD_CAIF)
102 return;
103
104 spin_lock_bh(&caifdevs->lock);
105 caifd = caif_get(dev);
106 if (caifd == NULL) {
107 spin_unlock_bh(&caifdevs->lock);
108 return;
109 }
110
111 list_del(&caifd->list);
112 spin_unlock_bh(&caifdevs->lock);
113
114 kfree(caifd);
115 return;
116}
117
118static int transmit(struct cflayer *layer, struct cfpkt *pkt)
119{
120 struct caif_device_entry *caifd =
121 container_of(layer, struct caif_device_entry, layer);
122 struct sk_buff *skb, *skb2;
123 int ret = -EINVAL;
124 skb = cfpkt_tonative(pkt);
125 skb->dev = caifd->netdev;
126 /*
127 * Don't allow SKB to be destroyed upon error, but signal resend
128 * notification to clients. We can't rely on the return value as
129 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
130 */
131 if (netif_queue_stopped(caifd->netdev))
132 return -EAGAIN;
133 skb2 = skb_get(skb);
134
135 ret = dev_queue_xmit(skb2);
136
137 if (!ret)
138 kfree_skb(skb);
139 else
140 return -EAGAIN;
141
142 return 0;
143}
144
145static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
146{
147 struct caif_device_entry *caifd;
148 struct caif_dev_common *caifdev;
149 caifd = container_of(layr, struct caif_device_entry, layer);
150 caifdev = netdev_priv(caifd->netdev);
151 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
152 atomic_set(&caifd->in_use, 1);
153 wake_up_interruptible(&caifd->event);
154
155 } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
156 atomic_set(&caifd->in_use, 0);
157 wake_up_interruptible(&caifd->event);
158 }
159 return 0;
160}
161
162/*
163 * Stuff received packets to associated sockets.
164 * On error, returns non-zero and releases the skb.
165 */
166static int receive(struct sk_buff *skb, struct net_device *dev,
167 struct packet_type *pkttype, struct net_device *orig_dev)
168{
169 struct net *net;
170 struct cfpkt *pkt;
171 struct caif_device_entry *caifd;
172 net = dev_net(dev);
173 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
174 caifd = caif_get(dev);
175 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
176 return NET_RX_DROP;
177
178 if (caifd->layer.up->receive(caifd->layer.up, pkt))
179 return NET_RX_DROP;
180
181 return 0;
182}
183
184static struct packet_type caif_packet_type __read_mostly = {
185 .type = cpu_to_be16(ETH_P_CAIF),
186 .func = receive,
187};
188
189static void dev_flowctrl(struct net_device *dev, int on)
190{
191 struct caif_device_entry *caifd = caif_get(dev);
192 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
193 return;
194
195 caifd->layer.up->ctrlcmd(caifd->layer.up,
196 on ?
197 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
198 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
199 caifd->layer.id);
200}
201
202/* notify Caif of device events */
203static int caif_device_notify(struct notifier_block *me, unsigned long what,
204 void *arg)
205{
206 struct net_device *dev = arg;
207 struct caif_device_entry *caifd = NULL;
208 struct caif_dev_common *caifdev;
209 enum cfcnfg_phy_preference pref;
210 int res = -EINVAL;
211 enum cfcnfg_phy_type phy_type;
212
213 if (dev->type != ARPHRD_CAIF)
214 return 0;
215
216 switch (what) {
217 case NETDEV_REGISTER:
218 pr_info("CAIF: %s():register %s\n", __func__, dev->name);
219 caifd = caif_device_alloc(dev);
220 if (caifd == NULL)
221 break;
222 caifdev = netdev_priv(dev);
223 caifdev->flowctrl = dev_flowctrl;
224 atomic_set(&caifd->state, what);
225 res = 0;
226 break;
227
228 case NETDEV_UP:
229 pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
230 caifd = caif_get(dev);
231 if (caifd == NULL)
232 break;
233 caifdev = netdev_priv(dev);
234 if (atomic_read(&caifd->state) == NETDEV_UP) {
235 pr_info("CAIF: %s():%s already up\n",
236 __func__, dev->name);
237 break;
238 }
239 atomic_set(&caifd->state, what);
240 caifd->layer.transmit = transmit;
241 caifd->layer.modemcmd = modemcmd;
242
243 if (caifdev->use_frag)
244 phy_type = CFPHYTYPE_FRAG;
245 else
246 phy_type = CFPHYTYPE_CAIF;
247
248 switch (caifdev->link_select) {
249 case CAIF_LINK_HIGH_BANDW:
250 pref = CFPHYPREF_LOW_LAT;
251 break;
252 case CAIF_LINK_LOW_LATENCY:
253 pref = CFPHYPREF_HIGH_BW;
254 break;
255 default:
256 pref = CFPHYPREF_HIGH_BW;
257 break;
258 }
259
260 cfcnfg_add_phy_layer(get_caif_conf(),
261 phy_type,
262 dev,
263 &caifd->layer,
264 &caifd->phyid,
265 pref,
266 caifdev->use_fcs,
267 caifdev->use_stx);
268 strncpy(caifd->layer.name, dev->name,
269 sizeof(caifd->layer.name) - 1);
270 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
271 break;
272
273 case NETDEV_GOING_DOWN:
274 caifd = caif_get(dev);
275 if (caifd == NULL)
276 break;
277 pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
278
279 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
280 atomic_read(&caifd->state) == NETDEV_DOWN)
281 break;
282
283 atomic_set(&caifd->state, what);
284 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
285 return -EINVAL;
286 caifd->layer.up->ctrlcmd(caifd->layer.up,
287 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 caifd->layer.id);
289 res = wait_event_interruptible_timeout(caifd->event,
290 atomic_read(&caifd->in_use) == 0,
291 TIMEOUT);
292 break;
293
294 case NETDEV_DOWN:
295 caifd = caif_get(dev);
296 if (caifd == NULL)
297 break;
298 pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
299 if (atomic_read(&caifd->in_use))
300 pr_warning("CAIF: %s(): "
301 "Unregistering an active CAIF device: %s\n",
302 __func__, dev->name);
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
304 atomic_set(&caifd->state, what);
305 break;
306
307 case NETDEV_UNREGISTER:
308 caifd = caif_get(dev);
309 pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
310 atomic_set(&caifd->state, what);
311 caif_device_destroy(dev);
312 break;
313 }
314 return 0;
315}
316
317static struct notifier_block caif_device_notifier = {
318 .notifier_call = caif_device_notify,
319 .priority = 0,
320};
321
322
323struct cfcnfg *get_caif_conf(void)
324{
325 return cfg;
326}
327EXPORT_SYMBOL(get_caif_conf);
328
329int caif_connect_client(struct caif_connect_request *conn_req,
330 struct cflayer *client_layer)
331{
332 struct cfctrl_link_param param;
333 if (connect_req_to_link_param(get_caif_conf(), conn_req, &param) == 0)
334 /* Hook up the adaptation layer. */
335 return cfcnfg_add_adaptation_layer(get_caif_conf(),
336 &param, client_layer);
337
338 return -EINVAL;
339
340 caif_assert(0);
341}
342EXPORT_SYMBOL(caif_connect_client);
343
344int caif_disconnect_client(struct cflayer *adap_layer)
345{
346 return cfcnfg_del_adapt_layer(get_caif_conf(), adap_layer);
347}
348EXPORT_SYMBOL(caif_disconnect_client);
349
350/* Per-namespace Caif devices handling */
351static int caif_init_net(struct net *net)
352{
353 struct caif_net *caifn = net_generic(net, caif_net_id);
354 INIT_LIST_HEAD(&caifn->caifdevs.list);
355 spin_lock_init(&caifn->caifdevs.lock);
356 return 0;
357}
358
359static void caif_exit_net(struct net *net)
360{
361 struct net_device *dev;
362 int res;
363 rtnl_lock();
364 for_each_netdev(net, dev) {
365 if (dev->type != ARPHRD_CAIF)
366 continue;
367 res = dev_close(dev);
368 caif_device_destroy(dev);
369 }
370 rtnl_unlock();
371}
372
373static struct pernet_operations caif_net_ops = {
374 .init = caif_init_net,
375 .exit = caif_exit_net,
376 .id = &caif_net_id,
377 .size = sizeof(struct caif_net),
378};
379
380/* Initialize Caif devices list */
381static int __init caif_device_init(void)
382{
383 int result;
384 cfg = cfcnfg_create();
385 if (!cfg) {
386 pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
387 goto err_cfcnfg_create_failed;
388 }
389 result = register_pernet_device(&caif_net_ops);
390
391 if (result) {
392 kfree(cfg);
393 cfg = NULL;
394 return result;
395 }
396 dev_add_pack(&caif_packet_type);
397 register_netdevice_notifier(&caif_device_notifier);
398
399 return result;
400err_cfcnfg_create_failed:
401 return -ENODEV;
402}
403
404static void __exit caif_device_exit(void)
405{
406 dev_remove_pack(&caif_packet_type);
407 unregister_pernet_device(&caif_net_ops);
408 unregister_netdevice_notifier(&caif_device_notifier);
409 cfcnfg_remove(cfg);
410}
411
412module_init(caif_device_init);
413module_exit(caif_device_exit);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
new file mode 100644
index 000000000000..cdf62b9fefac
--- /dev/null
+++ b/net/caif/caif_socket.c
@@ -0,0 +1,1391 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com
4 * Per Sigmond per.sigmond@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#include <linux/fs.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/spinlock.h>
13#include <linux/mutex.h>
14#include <linux/list.h>
15#include <linux/wait.h>
16#include <linux/poll.h>
17#include <linux/tcp.h>
18#include <linux/uaccess.h>
19#include <asm/atomic.h>
20
21#include <linux/caif/caif_socket.h>
22#include <net/caif/caif_layer.h>
23#include <net/caif/caif_dev.h>
24#include <net/caif/cfpkt.h>
25
26MODULE_LICENSE("GPL");
27
28#define CHNL_SKT_READ_QUEUE_HIGH 200
29#define CHNL_SKT_READ_QUEUE_LOW 100
30
31static int caif_sockbuf_size = 40000;
32static atomic_t caif_nr_socks = ATOMIC_INIT(0);
33
34#define CONN_STATE_OPEN_BIT 1
35#define CONN_STATE_PENDING_BIT 2
36#define CONN_STATE_PEND_DESTROY_BIT 3
37#define CONN_REMOTE_SHUTDOWN_BIT 4
38
39#define TX_FLOW_ON_BIT 1
40#define RX_FLOW_ON_BIT 2
41
42#define STATE_IS_OPEN(cf_sk) test_bit(CONN_STATE_OPEN_BIT,\
43 (void *) &(cf_sk)->conn_state)
44#define STATE_IS_REMOTE_SHUTDOWN(cf_sk) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\
45 (void *) &(cf_sk)->conn_state)
46#define STATE_IS_PENDING(cf_sk) test_bit(CONN_STATE_PENDING_BIT,\
47 (void *) &(cf_sk)->conn_state)
48#define STATE_IS_PENDING_DESTROY(cf_sk) test_bit(CONN_STATE_PEND_DESTROY_BIT,\
49 (void *) &(cf_sk)->conn_state)
50
51#define SET_STATE_PENDING_DESTROY(cf_sk) set_bit(CONN_STATE_PEND_DESTROY_BIT,\
52 (void *) &(cf_sk)->conn_state)
53#define SET_STATE_OPEN(cf_sk) set_bit(CONN_STATE_OPEN_BIT,\
54 (void *) &(cf_sk)->conn_state)
55#define SET_STATE_CLOSED(cf_sk) clear_bit(CONN_STATE_OPEN_BIT,\
56 (void *) &(cf_sk)->conn_state)
57#define SET_PENDING_ON(cf_sk) set_bit(CONN_STATE_PENDING_BIT,\
58 (void *) &(cf_sk)->conn_state)
59#define SET_PENDING_OFF(cf_sk) clear_bit(CONN_STATE_PENDING_BIT,\
60 (void *) &(cf_sk)->conn_state)
61#define SET_REMOTE_SHUTDOWN(cf_sk) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\
62 (void *) &(cf_sk)->conn_state)
63
64#define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\
65 (void *) &(dev)->conn_state)
66#define RX_FLOW_IS_ON(cf_sk) test_bit(RX_FLOW_ON_BIT,\
67 (void *) &(cf_sk)->flow_state)
68#define TX_FLOW_IS_ON(cf_sk) test_bit(TX_FLOW_ON_BIT,\
69 (void *) &(cf_sk)->flow_state)
70
71#define SET_RX_FLOW_OFF(cf_sk) clear_bit(RX_FLOW_ON_BIT,\
72 (void *) &(cf_sk)->flow_state)
73#define SET_RX_FLOW_ON(cf_sk) set_bit(RX_FLOW_ON_BIT,\
74 (void *) &(cf_sk)->flow_state)
75#define SET_TX_FLOW_OFF(cf_sk) clear_bit(TX_FLOW_ON_BIT,\
76 (void *) &(cf_sk)->flow_state)
77#define SET_TX_FLOW_ON(cf_sk) set_bit(TX_FLOW_ON_BIT,\
78 (void *) &(cf_sk)->flow_state)
79
80#define SKT_READ_FLAG 0x01
81#define SKT_WRITE_FLAG 0x02
82static struct dentry *debugfsdir;
83#include <linux/debugfs.h>
84
85#ifdef CONFIG_DEBUG_FS
86struct debug_fs_counter {
87 atomic_t num_open;
88 atomic_t num_close;
89 atomic_t num_init;
90 atomic_t num_init_resp;
91 atomic_t num_init_fail_resp;
92 atomic_t num_deinit;
93 atomic_t num_deinit_resp;
94 atomic_t num_remote_shutdown_ind;
95 atomic_t num_tx_flow_off_ind;
96 atomic_t num_tx_flow_on_ind;
97 atomic_t num_rx_flow_off;
98 atomic_t num_rx_flow_on;
99 atomic_t skb_in_use;
100 atomic_t skb_alloc;
101 atomic_t skb_free;
102};
103static struct debug_fs_counter cnt;
104#define dbfs_atomic_inc(v) atomic_inc(v)
105#define dbfs_atomic_dec(v) atomic_dec(v)
106#else
107#define dbfs_atomic_inc(v)
108#define dbfs_atomic_dec(v)
109#endif
110
111/* The AF_CAIF socket */
112struct caifsock {
113 /* NOTE: sk has to be the first member */
114 struct sock sk;
115 struct cflayer layer;
116 char name[CAIF_LAYER_NAME_SZ];
117 u32 conn_state;
118 u32 flow_state;
119 struct cfpktq *pktq;
120 int file_mode;
121 struct caif_connect_request conn_req;
122 int read_queue_len;
123 /* protect updates of read_queue_len */
124 spinlock_t read_queue_len_lock;
125 struct dentry *debugfs_socket_dir;
126};
127
128static void drain_queue(struct caifsock *cf_sk);
129
130/* Packet Receive Callback function called from CAIF Stack */
131static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
132{
133 struct caifsock *cf_sk;
134 int read_queue_high;
135 cf_sk = container_of(layr, struct caifsock, layer);
136
137 if (!STATE_IS_OPEN(cf_sk)) {
138 /*FIXME: This should be allowed finally!*/
139 pr_debug("CAIF: %s(): called after close request\n", __func__);
140 cfpkt_destroy(pkt);
141 return 0;
142 }
143 /* NOTE: This function may be called in Tasklet context! */
144
145 /* The queue has its own lock */
146 cfpkt_queue(cf_sk->pktq, pkt, 0);
147
148 spin_lock(&cf_sk->read_queue_len_lock);
149 cf_sk->read_queue_len++;
150
151 read_queue_high = (cf_sk->read_queue_len > CHNL_SKT_READ_QUEUE_HIGH);
152 spin_unlock(&cf_sk->read_queue_len_lock);
153
154 if (RX_FLOW_IS_ON(cf_sk) && read_queue_high) {
155 dbfs_atomic_inc(&cnt.num_rx_flow_off);
156 SET_RX_FLOW_OFF(cf_sk);
157
158 /* Send flow off (NOTE: must not sleep) */
159 pr_debug("CAIF: %s():"
160 " sending flow OFF (queue len = %d)\n",
161 __func__,
162 cf_sk->read_queue_len);
163 caif_assert(cf_sk->layer.dn);
164 caif_assert(cf_sk->layer.dn->ctrlcmd);
165
166 (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
167 CAIF_MODEMCMD_FLOW_OFF_REQ);
168 }
169
170 /* Signal reader that data is available. */
171
172 wake_up_interruptible(cf_sk->sk.sk_sleep);
173
174 return 0;
175}
176
177/* Packet Flow Control Callback function called from CAIF */
178static void caif_sktflowctrl_cb(struct cflayer *layr,
179 enum caif_ctrlcmd flow,
180 int phyid)
181{
182 struct caifsock *cf_sk;
183
184 /* NOTE: This function may be called in Tasklet context! */
185 pr_debug("CAIF: %s(): flowctrl func called: %s.\n",
186 __func__,
187 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
188 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
189 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" :
190 flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" :
191 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" :
192 flow ==
193 CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" :
194 "UKNOWN CTRL COMMAND");
195
196 if (layr == NULL)
197 return;
198
199 cf_sk = container_of(layr, struct caifsock, layer);
200
201 switch (flow) {
202 case CAIF_CTRLCMD_FLOW_ON_IND:
203 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
204 /* Signal reader that data is available. */
205 SET_TX_FLOW_ON(cf_sk);
206 wake_up_interruptible(cf_sk->sk.sk_sleep);
207 break;
208
209 case CAIF_CTRLCMD_FLOW_OFF_IND:
210 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
211 SET_TX_FLOW_OFF(cf_sk);
212 break;
213
214 case CAIF_CTRLCMD_INIT_RSP:
215 dbfs_atomic_inc(&cnt.num_init_resp);
216 /* Signal reader that data is available. */
217 caif_assert(STATE_IS_OPEN(cf_sk));
218 SET_PENDING_OFF(cf_sk);
219 SET_TX_FLOW_ON(cf_sk);
220 wake_up_interruptible(cf_sk->sk.sk_sleep);
221 break;
222
223 case CAIF_CTRLCMD_DEINIT_RSP:
224 dbfs_atomic_inc(&cnt.num_deinit_resp);
225 caif_assert(!STATE_IS_OPEN(cf_sk));
226 SET_PENDING_OFF(cf_sk);
227 if (!STATE_IS_PENDING_DESTROY(cf_sk)) {
228 if (cf_sk->sk.sk_sleep != NULL)
229 wake_up_interruptible(cf_sk->sk.sk_sleep);
230 }
231 dbfs_atomic_inc(&cnt.num_deinit);
232 sock_put(&cf_sk->sk);
233 break;
234
235 case CAIF_CTRLCMD_INIT_FAIL_RSP:
236 dbfs_atomic_inc(&cnt.num_init_fail_resp);
237 caif_assert(STATE_IS_OPEN(cf_sk));
238 SET_STATE_CLOSED(cf_sk);
239 SET_PENDING_OFF(cf_sk);
240 SET_TX_FLOW_OFF(cf_sk);
241 wake_up_interruptible(cf_sk->sk.sk_sleep);
242 break;
243
244 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
245 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
246 SET_REMOTE_SHUTDOWN(cf_sk);
247 /* Use sk_shutdown to indicate remote shutdown indication */
248 cf_sk->sk.sk_shutdown |= RCV_SHUTDOWN;
249 cf_sk->file_mode = 0;
250 wake_up_interruptible(cf_sk->sk.sk_sleep);
251 break;
252
253 default:
254 pr_debug("CAIF: %s(): Unexpected flow command %d\n",
255 __func__, flow);
256 }
257}
258
259static void skb_destructor(struct sk_buff *skb)
260{
261 dbfs_atomic_inc(&cnt.skb_free);
262 dbfs_atomic_dec(&cnt.skb_in_use);
263}
264
265
266static int caif_recvmsg(struct kiocb *iocb, struct socket *sock,
267 struct msghdr *m, size_t buf_len, int flags)
268
269{
270 struct sock *sk = sock->sk;
271 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
272 struct cfpkt *pkt = NULL;
273 size_t len;
274 int result;
275 struct sk_buff *skb;
276 ssize_t ret = -EIO;
277 int read_queue_low;
278
279 if (cf_sk == NULL) {
280 pr_debug("CAIF: %s(): private_data not set!\n",
281 __func__);
282 ret = -EBADFD;
283 goto read_error;
284 }
285
286 /* Don't do multiple iovec entries yet */
287 if (m->msg_iovlen != 1)
288 return -EOPNOTSUPP;
289
290 if (unlikely(!buf_len))
291 return -EINVAL;
292
293 lock_sock(&(cf_sk->sk));
294
295 caif_assert(cf_sk->pktq);
296
297 if (!STATE_IS_OPEN(cf_sk)) {
298 /* Socket is closed or closing. */
299 if (!STATE_IS_PENDING(cf_sk)) {
300 pr_debug("CAIF: %s(): socket is closed (by remote)\n",
301 __func__);
302 ret = -EPIPE;
303 } else {
304 pr_debug("CAIF: %s(): socket is closing..\n", __func__);
305 ret = -EBADF;
306 }
307 goto read_error;
308 }
309 /* Socket is open or opening. */
310 if (STATE_IS_PENDING(cf_sk)) {
311 pr_debug("CAIF: %s(): socket is opening...\n", __func__);
312
313 if (flags & MSG_DONTWAIT) {
314 /* We can't block. */
315 pr_debug("CAIF: %s():state pending and MSG_DONTWAIT\n",
316 __func__);
317 ret = -EAGAIN;
318 goto read_error;
319 }
320
321 /*
322 * Blocking mode; state is pending and we need to wait
323 * for its conclusion.
324 */
325 release_sock(&cf_sk->sk);
326
327 result =
328 wait_event_interruptible(*cf_sk->sk.sk_sleep,
329 !STATE_IS_PENDING(cf_sk));
330
331 lock_sock(&(cf_sk->sk));
332
333 if (result == -ERESTARTSYS) {
334 pr_debug("CAIF: %s(): wait_event_interruptible"
335 " woken by a signal (1)", __func__);
336 ret = -ERESTARTSYS;
337 goto read_error;
338 }
339 }
340
341 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) ||
342 !STATE_IS_OPEN(cf_sk) ||
343 STATE_IS_PENDING(cf_sk)) {
344
345 pr_debug("CAIF: %s(): socket closed\n",
346 __func__);
347 ret = -ESHUTDOWN;
348 goto read_error;
349 }
350
351 /*
352 * Block if we don't have any received buffers.
353 * The queue has its own lock.
354 */
355 while ((pkt = cfpkt_qpeek(cf_sk->pktq)) == NULL) {
356
357 if (flags & MSG_DONTWAIT) {
358 pr_debug("CAIF: %s(): MSG_DONTWAIT\n", __func__);
359 ret = -EAGAIN;
360 goto read_error;
361 }
362 trace_printk("CAIF: %s() wait_event\n", __func__);
363
364 /* Let writers in. */
365 release_sock(&cf_sk->sk);
366
367 /* Block reader until data arrives or socket is closed. */
368 if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
369 cfpkt_qpeek(cf_sk->pktq)
370 || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
371 || !STATE_IS_OPEN(cf_sk)) ==
372 -ERESTARTSYS) {
373 pr_debug("CAIF: %s():"
374 " wait_event_interruptible woken by "
375 "a signal, signal_pending(current) = %d\n",
376 __func__,
377 signal_pending(current));
378 return -ERESTARTSYS;
379 }
380
381 trace_printk("CAIF: %s() awake\n", __func__);
382 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
383 pr_debug("CAIF: %s(): "
384 "received remote_shutdown indication\n",
385 __func__);
386 ret = -ESHUTDOWN;
387 goto read_error_no_unlock;
388 }
389
390 /* I want to be alone on cf_sk (except status and queue). */
391 lock_sock(&(cf_sk->sk));
392
393 if (!STATE_IS_OPEN(cf_sk)) {
394 /* Someone closed the link, report error. */
395 pr_debug("CAIF: %s(): remote end shutdown!\n",
396 __func__);
397 ret = -EPIPE;
398 goto read_error;
399 }
400 }
401
402 /* The queue has its own lock. */
403 len = cfpkt_getlen(pkt);
404
405 /* Check max length that can be copied. */
406 if (len <= buf_len)
407 pkt = cfpkt_dequeue(cf_sk->pktq);
408 else {
409 pr_debug("CAIF: %s(): user buffer too small (%ld,%ld)\n",
410 __func__, (long) len, (long) buf_len);
411 if (sock->type == SOCK_SEQPACKET) {
412 ret = -EMSGSIZE;
413 goto read_error;
414 }
415 len = buf_len;
416 }
417
418
419 spin_lock(&cf_sk->read_queue_len_lock);
420 cf_sk->read_queue_len--;
421 read_queue_low = (cf_sk->read_queue_len < CHNL_SKT_READ_QUEUE_LOW);
422 spin_unlock(&cf_sk->read_queue_len_lock);
423
424 if (!RX_FLOW_IS_ON(cf_sk) && read_queue_low) {
425 dbfs_atomic_inc(&cnt.num_rx_flow_on);
426 SET_RX_FLOW_ON(cf_sk);
427
428 /* Send flow on. */
429 pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n",
430 __func__, cf_sk->read_queue_len);
431 caif_assert(cf_sk->layer.dn);
432 caif_assert(cf_sk->layer.dn->ctrlcmd);
433 (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
434 CAIF_MODEMCMD_FLOW_ON_REQ);
435
436 caif_assert(cf_sk->read_queue_len >= 0);
437 }
438
439 skb = cfpkt_tonative(pkt);
440 result = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
441 skb_pull(skb, len);
442
443 if (result) {
444 pr_debug("CAIF: %s(): copy to_iovec failed\n", __func__);
445 cfpkt_destroy(pkt);
446 ret = -EFAULT;
447 goto read_error;
448 }
449
450 /* Free packet and remove from queue */
451 if (skb->len == 0)
452 skb_free_datagram(sk, skb);
453
454 /* Let the others in. */
455 release_sock(&cf_sk->sk);
456 return len;
457
458read_error:
459 release_sock(&cf_sk->sk);
460read_error_no_unlock:
461 return ret;
462}
463
464/* Send a signal as a consequence of sendmsg, sendto or caif_sendmsg. */
465static int caif_sendmsg(struct kiocb *kiocb, struct socket *sock,
466 struct msghdr *msg, size_t len)
467{
468
469 struct sock *sk = sock->sk;
470 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
471 size_t payload_size = msg->msg_iov->iov_len;
472 struct cfpkt *pkt = NULL;
473 struct caif_payload_info info;
474 unsigned char *txbuf;
475 ssize_t ret = -EIO;
476 int result;
477 struct sk_buff *skb;
478 caif_assert(msg->msg_iovlen == 1);
479
480 if (cf_sk == NULL) {
481 pr_debug("CAIF: %s(): private_data not set!\n",
482 __func__);
483 ret = -EBADFD;
484 goto write_error_no_unlock;
485 }
486
487 if (unlikely(msg->msg_iov->iov_base == NULL)) {
488 pr_warning("CAIF: %s(): Buffer is NULL.\n", __func__);
489 ret = -EINVAL;
490 goto write_error_no_unlock;
491 }
492
493 if (payload_size > CAIF_MAX_PAYLOAD_SIZE) {
494 pr_debug("CAIF: %s(): buffer too long\n", __func__);
495 if (sock->type == SOCK_SEQPACKET) {
496 ret = -EINVAL;
497 goto write_error_no_unlock;
498 }
499 payload_size = CAIF_MAX_PAYLOAD_SIZE;
500 }
501
502 /* I want to be alone on cf_sk (except status and queue) */
503 lock_sock(&(cf_sk->sk));
504
505 caif_assert(cf_sk->pktq);
506
507 if (!STATE_IS_OPEN(cf_sk)) {
508 /* Socket is closed or closing */
509 if (!STATE_IS_PENDING(cf_sk)) {
510 pr_debug("CAIF: %s(): socket is closed (by remote)\n",
511 __func__);
512 ret = -EPIPE;
513 } else {
514 pr_debug("CAIF: %s(): socket is closing...\n",
515 __func__);
516 ret = -EBADF;
517 }
518 goto write_error;
519 }
520
521 /* Socket is open or opening */
522 if (STATE_IS_PENDING(cf_sk)) {
523 pr_debug("CAIF: %s(): socket is opening...\n", __func__);
524
525 if (msg->msg_flags & MSG_DONTWAIT) {
526 /* We can't block */
527 trace_printk("CAIF: %s():state pending:"
528 "state=MSG_DONTWAIT\n", __func__);
529 ret = -EAGAIN;
530 goto write_error;
531 }
532 /* Let readers in */
533 release_sock(&cf_sk->sk);
534
535 /*
536 * Blocking mode; state is pending and we need to wait
537 * for its conclusion.
538 */
539 result =
540 wait_event_interruptible(*cf_sk->sk.sk_sleep,
541 !STATE_IS_PENDING(cf_sk));
542 /* I want to be alone on cf_sk (except status and queue) */
543 lock_sock(&(cf_sk->sk));
544
545 if (result == -ERESTARTSYS) {
546 pr_debug("CAIF: %s(): wait_event_interruptible"
547 " woken by a signal (1)", __func__);
548 ret = -ERESTARTSYS;
549 goto write_error;
550 }
551 }
552 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) ||
553 !STATE_IS_OPEN(cf_sk) ||
554 STATE_IS_PENDING(cf_sk)) {
555
556 pr_debug("CAIF: %s(): socket closed\n",
557 __func__);
558 ret = -ESHUTDOWN;
559 goto write_error;
560 }
561
562 if (!TX_FLOW_IS_ON(cf_sk)) {
563
564 /* Flow is off. Check non-block flag */
565 if (msg->msg_flags & MSG_DONTWAIT) {
566 trace_printk("CAIF: %s(): MSG_DONTWAIT and tx flow off",
567 __func__);
568 ret = -EAGAIN;
569 goto write_error;
570 }
571
572 /* release lock before waiting */
573 release_sock(&cf_sk->sk);
574
575 /* Wait until flow is on or socket is closed */
576 if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
577 TX_FLOW_IS_ON(cf_sk)
578 || !STATE_IS_OPEN(cf_sk)
579 || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
580 ) == -ERESTARTSYS) {
581 pr_debug("CAIF: %s():"
582 " wait_event_interruptible woken by a signal",
583 __func__);
584 ret = -ERESTARTSYS;
585 goto write_error_no_unlock;
586 }
587
588 /* I want to be alone on cf_sk (except status and queue) */
589 lock_sock(&(cf_sk->sk));
590
591 if (!STATE_IS_OPEN(cf_sk)) {
592 /* someone closed the link, report error */
593 pr_debug("CAIF: %s(): remote end shutdown!\n",
594 __func__);
595 ret = -EPIPE;
596 goto write_error;
597 }
598
599 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
600 pr_debug("CAIF: %s(): "
601 "received remote_shutdown indication\n",
602 __func__);
603 ret = -ESHUTDOWN;
604 goto write_error;
605 }
606 }
607
608 pkt = cfpkt_create(payload_size);
609 skb = (struct sk_buff *)pkt;
610 skb->destructor = skb_destructor;
611 skb->sk = sk;
612 dbfs_atomic_inc(&cnt.skb_alloc);
613 dbfs_atomic_inc(&cnt.skb_in_use);
614 if (cfpkt_raw_append(pkt, (void **) &txbuf, payload_size) < 0) {
615 pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__);
616 cfpkt_destroy(pkt);
617 ret = -EINVAL;
618 goto write_error;
619 }
620
621 /* Copy data into buffer. */
622 if (copy_from_user(txbuf, msg->msg_iov->iov_base, payload_size)) {
623 pr_debug("CAIF: %s(): copy_from_user returned non zero.\n",
624 __func__);
625 cfpkt_destroy(pkt);
626 ret = -EINVAL;
627 goto write_error;
628 }
629 memset(&info, 0, sizeof(info));
630
631 /* Send the packet down the stack. */
632 caif_assert(cf_sk->layer.dn);
633 caif_assert(cf_sk->layer.dn->transmit);
634
635 do {
636 ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
637
638 if (likely((ret >= 0) || (ret != -EAGAIN)))
639 break;
640
641 /* EAGAIN - retry */
642 if (msg->msg_flags & MSG_DONTWAIT) {
643 pr_debug("CAIF: %s(): NONBLOCK and transmit failed,"
644 " error = %ld\n", __func__, (long) ret);
645 ret = -EAGAIN;
646 goto write_error;
647 }
648
649 /* Let readers in */
650 release_sock(&cf_sk->sk);
651
652 /* Wait until flow is on or socket is closed */
653 if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
654 TX_FLOW_IS_ON(cf_sk)
655 || !STATE_IS_OPEN(cf_sk)
656 || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
657 ) == -ERESTARTSYS) {
658 pr_debug("CAIF: %s(): wait_event_interruptible"
659 " woken by a signal", __func__);
660 ret = -ERESTARTSYS;
661 goto write_error_no_unlock;
662 }
663
664 /* I want to be alone on cf_sk (except status and queue) */
665 lock_sock(&(cf_sk->sk));
666
667 } while (ret == -EAGAIN);
668
669 if (ret < 0) {
670 cfpkt_destroy(pkt);
671 pr_debug("CAIF: %s(): transmit failed, error = %ld\n",
672 __func__, (long) ret);
673
674 goto write_error;
675 }
676
677 release_sock(&cf_sk->sk);
678 return payload_size;
679
680write_error:
681 release_sock(&cf_sk->sk);
682write_error_no_unlock:
683 return ret;
684}
685
686static unsigned int caif_poll(struct file *file, struct socket *sock,
687 poll_table *wait)
688{
689 struct sock *sk = sock->sk;
690 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
691 u32 mask = 0;
692 poll_wait(file, sk->sk_sleep, wait);
693 lock_sock(&(cf_sk->sk));
694 if (!STATE_IS_OPEN(cf_sk)) {
695 if (!STATE_IS_PENDING(cf_sk))
696 mask |= POLLHUP;
697 } else {
698 if (cfpkt_qpeek(cf_sk->pktq) != NULL)
699 mask |= (POLLIN | POLLRDNORM);
700 if (TX_FLOW_IS_ON(cf_sk))
701 mask |= (POLLOUT | POLLWRNORM);
702 }
703 release_sock(&cf_sk->sk);
704 trace_printk("CAIF: %s(): poll mask=0x%04x\n",
705 __func__, mask);
706 return mask;
707}
708
709static void drain_queue(struct caifsock *cf_sk)
710{
711 struct cfpkt *pkt = NULL;
712
713 /* Empty the queue */
714 do {
715 /* The queue has its own lock */
716 if (!cf_sk->pktq)
717 break;
718
719 pkt = cfpkt_dequeue(cf_sk->pktq);
720 if (!pkt)
721 break;
722 pr_debug("CAIF: %s(): freeing packet from read queue\n",
723 __func__);
724 cfpkt_destroy(pkt);
725
726 } while (1);
727
728 cf_sk->read_queue_len = 0;
729}
730
731static int setsockopt(struct socket *sock,
732 int lvl, int opt, char __user *ov, unsigned int ol)
733{
734 struct sock *sk = sock->sk;
735 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
736 int prio, linksel;
737 struct ifreq ifreq;
738
739 if (STATE_IS_OPEN(cf_sk)) {
740 pr_debug("CAIF: %s(): setsockopt "
741 "cannot be done on a connected socket\n",
742 __func__);
743 return -ENOPROTOOPT;
744 }
745 switch (opt) {
746 case CAIFSO_LINK_SELECT:
747 if (ol < sizeof(int)) {
748 pr_debug("CAIF: %s(): setsockopt"
749 " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
750 return -EINVAL;
751 }
752 if (lvl != SOL_CAIF)
753 goto bad_sol;
754 if (copy_from_user(&linksel, ov, sizeof(int)))
755 return -EINVAL;
756 lock_sock(&(cf_sk->sk));
757 cf_sk->conn_req.link_selector = linksel;
758 release_sock(&cf_sk->sk);
759 return 0;
760
761 case SO_PRIORITY:
762 if (lvl != SOL_SOCKET)
763 goto bad_sol;
764 if (ol < sizeof(int)) {
765 pr_debug("CAIF: %s(): setsockopt"
766 " SO_PRIORITY bad size\n", __func__);
767 return -EINVAL;
768 }
769 if (copy_from_user(&prio, ov, sizeof(int)))
770 return -EINVAL;
771 lock_sock(&(cf_sk->sk));
772 cf_sk->conn_req.priority = prio;
773 pr_debug("CAIF: %s(): Setting sockopt priority=%d\n", __func__,
774 cf_sk->conn_req.priority);
775 release_sock(&cf_sk->sk);
776 return 0;
777
778 case SO_BINDTODEVICE:
779 if (lvl != SOL_SOCKET)
780 goto bad_sol;
781 if (ol < sizeof(struct ifreq)) {
782 pr_debug("CAIF: %s(): setsockopt"
783 " SO_PRIORITY bad size\n", __func__);
784 return -EINVAL;
785 }
786 if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
787 return -EFAULT;
788 lock_sock(&(cf_sk->sk));
789 strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
790 sizeof(cf_sk->conn_req.link_name));
791 cf_sk->conn_req.link_name
792 [sizeof(cf_sk->conn_req.link_name)-1] = 0;
793 release_sock(&cf_sk->sk);
794 return 0;
795
796 case CAIFSO_REQ_PARAM:
797 if (lvl != SOL_CAIF)
798 goto bad_sol;
799 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
800 return -ENOPROTOOPT;
801 if (ol > sizeof(cf_sk->conn_req.param.data))
802 goto req_param_bad_size;
803
804 lock_sock(&(cf_sk->sk));
805 cf_sk->conn_req.param.size = ol;
806 if (copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
807 release_sock(&cf_sk->sk);
808req_param_bad_size:
809 pr_debug("CAIF: %s(): setsockopt"
810 " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
811 return -EINVAL;
812 }
813
814 release_sock(&cf_sk->sk);
815 return 0;
816
817 default:
818 pr_debug("CAIF: %s(): unhandled option %d\n", __func__, opt);
819 return -EINVAL;
820 }
821
822 return 0;
823bad_sol:
824 pr_debug("CAIF: %s(): setsockopt bad level\n", __func__);
825 return -ENOPROTOOPT;
826
827}
828
829static int caif_connect(struct socket *sock, struct sockaddr *uservaddr,
830 int sockaddr_len, int flags)
831{
832 struct caifsock *cf_sk = NULL;
833 int result = -1;
834 int mode = 0;
835 int ret = -EIO;
836 struct sock *sk = sock->sk;
837 BUG_ON(sk == NULL);
838
839 cf_sk = container_of(sk, struct caifsock, sk);
840
841 trace_printk("CAIF: %s(): cf_sk=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n",
842 __func__, cf_sk,
843 STATE_IS_OPEN(cf_sk),
844 TX_FLOW_IS_ON(cf_sk), RX_FLOW_IS_ON(cf_sk));
845
846
847 if (sock->type == SOCK_SEQPACKET || sock->type == SOCK_STREAM)
848 sock->state = SS_CONNECTING;
849 else
850 goto out;
851
852 /* I want to be alone on cf_sk (except status and queue) */
853 lock_sock(&(cf_sk->sk));
854
855 if (sockaddr_len != sizeof(struct sockaddr_caif)) {
856 pr_debug("CAIF: %s(): Bad address len (%ld,%lu)\n",
857 __func__, (long) sockaddr_len,
858 (long unsigned) sizeof(struct sockaddr_caif));
859 ret = -EINVAL;
860 goto open_error;
861 }
862
863 if (uservaddr->sa_family != AF_CAIF) {
864 pr_debug("CAIF: %s(): Bad address family (%d)\n",
865 __func__, uservaddr->sa_family);
866 ret = -EAFNOSUPPORT;
867 goto open_error;
868 }
869
870 memcpy(&cf_sk->conn_req.sockaddr, uservaddr,
871 sizeof(struct sockaddr_caif));
872
873 dbfs_atomic_inc(&cnt.num_open);
874 mode = SKT_READ_FLAG | SKT_WRITE_FLAG;
875
876 /* If socket is not open, make sure socket is in fully closed state */
877 if (!STATE_IS_OPEN(cf_sk)) {
878 /* Has link close response been received (if we ever sent it)?*/
879 if (STATE_IS_PENDING(cf_sk)) {
880 /*
881 * Still waiting for close response from remote.
882 * If opened non-blocking, report "would block"
883 */
884 if (flags & O_NONBLOCK) {
885 pr_debug("CAIF: %s(): O_NONBLOCK"
886 " && close pending\n", __func__);
887 ret = -EAGAIN;
888 goto open_error;
889 }
890
891 pr_debug("CAIF: %s(): Wait for close response"
892 " from remote...\n", __func__);
893
894 release_sock(&cf_sk->sk);
895
896 /*
897 * Blocking mode; close is pending and we need to wait
898 * for its conclusion.
899 */
900 result =
901 wait_event_interruptible(*cf_sk->sk.sk_sleep,
902 !STATE_IS_PENDING(cf_sk));
903
904 lock_sock(&(cf_sk->sk));
905 if (result == -ERESTARTSYS) {
906 pr_debug("CAIF: %s(): wait_event_interruptible"
907 "woken by a signal (1)", __func__);
908 ret = -ERESTARTSYS;
909 goto open_error;
910 }
911 }
912 }
913
914 /* socket is now either closed, pending open or open */
915 if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) {
916 /* Open */
917 pr_debug("CAIF: %s(): Socket is already opened (cf_sk=%p)"
918 " check access f_flags = 0x%x file_mode = 0x%x\n",
919 __func__, cf_sk, mode, cf_sk->file_mode);
920
921 } else {
922 /* We are closed or pending open.
923 * If closed: send link setup
924 * If pending open: link setup already sent (we could have been
925 * interrupted by a signal last time)
926 */
927 if (!STATE_IS_OPEN(cf_sk)) {
928 /* First opening of file; connect lower layers: */
929 /* Drain queue (very unlikely) */
930 drain_queue(cf_sk);
931
932 cf_sk->layer.receive = caif_sktrecv_cb;
933 SET_STATE_OPEN(cf_sk);
934 SET_PENDING_ON(cf_sk);
935
936 /* Register this channel. */
937 result =
938 caif_connect_client(&cf_sk->conn_req,
939 &cf_sk->layer);
940 if (result < 0) {
941 pr_debug("CAIF: %s(): can't register channel\n",
942 __func__);
943 ret = -EIO;
944 SET_STATE_CLOSED(cf_sk);
945 SET_PENDING_OFF(cf_sk);
946 goto open_error;
947 }
948 dbfs_atomic_inc(&cnt.num_init);
949 }
950
951 /* If opened non-blocking, report "success".
952 */
953 if (flags & O_NONBLOCK) {
954 pr_debug("CAIF: %s(): O_NONBLOCK success\n",
955 __func__);
956 ret = -EINPROGRESS;
957 cf_sk->sk.sk_err = -EINPROGRESS;
958 goto open_error;
959 }
960
961 trace_printk("CAIF: %s(): Wait for connect response\n",
962 __func__);
963
964 /* release lock before waiting */
965 release_sock(&cf_sk->sk);
966
967 result =
968 wait_event_interruptible(*cf_sk->sk.sk_sleep,
969 !STATE_IS_PENDING(cf_sk));
970
971 lock_sock(&(cf_sk->sk));
972
973 if (result == -ERESTARTSYS) {
974 pr_debug("CAIF: %s(): wait_event_interruptible"
975 "woken by a signal (2)", __func__);
976 ret = -ERESTARTSYS;
977 goto open_error;
978 }
979
980 if (!STATE_IS_OPEN(cf_sk)) {
981 /* Lower layers said "no" */
982 pr_debug("CAIF: %s(): Closed received\n", __func__);
983 ret = -EPIPE;
984 goto open_error;
985 }
986
987 trace_printk("CAIF: %s(): Connect received\n", __func__);
988 }
989 /* Open is ok */
990 cf_sk->file_mode |= mode;
991
992 trace_printk("CAIF: %s(): Connected - file mode = %x\n",
993 __func__, cf_sk->file_mode);
994
995 release_sock(&cf_sk->sk);
996 return 0;
997open_error:
998 sock->state = SS_UNCONNECTED;
999 release_sock(&cf_sk->sk);
1000out:
1001 return ret;
1002}
1003
1004static int caif_shutdown(struct socket *sock, int how)
1005{
1006 struct caifsock *cf_sk = NULL;
1007 int result = 0;
1008 int tx_flow_state_was_on;
1009 struct sock *sk = sock->sk;
1010
1011 trace_printk("CAIF: %s(): enter\n", __func__);
1012 pr_debug("f_flags=%x\n", sock->file->f_flags);
1013
1014 if (how != SHUT_RDWR)
1015 return -EOPNOTSUPP;
1016
1017 cf_sk = container_of(sk, struct caifsock, sk);
1018 if (cf_sk == NULL) {
1019 pr_debug("CAIF: %s(): COULD NOT FIND SOCKET\n", __func__);
1020 return -EBADF;
1021 }
1022
1023 /* I want to be alone on cf_sk (except status queue) */
1024 lock_sock(&(cf_sk->sk));
1025 sock_hold(&cf_sk->sk);
1026
1027 /* IS_CLOSED have double meaning:
1028 * 1) Spontanous Remote Shutdown Request.
1029 * 2) Ack on a channel teardown(disconnect)
1030 * Must clear bit in case we previously received
1031 * remote shudown request.
1032 */
1033 if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) {
1034 SET_STATE_CLOSED(cf_sk);
1035 SET_PENDING_ON(cf_sk);
1036 tx_flow_state_was_on = TX_FLOW_IS_ON(cf_sk);
1037 SET_TX_FLOW_OFF(cf_sk);
1038
1039 /* Hold the socket until DEINIT_RSP is received */
1040 sock_hold(&cf_sk->sk);
1041 result = caif_disconnect_client(&cf_sk->layer);
1042
1043 if (result < 0) {
1044 pr_debug("CAIF: %s(): "
1045 "caif_disconnect_client() failed\n",
1046 __func__);
1047 SET_STATE_CLOSED(cf_sk);
1048 SET_PENDING_OFF(cf_sk);
1049 SET_TX_FLOW_OFF(cf_sk);
1050 release_sock(&cf_sk->sk);
1051 sock_put(&cf_sk->sk);
1052 return -EIO;
1053 }
1054
1055 }
1056 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
1057 SET_PENDING_OFF(cf_sk);
1058 SET_REMOTE_SHUTDOWN_OFF(cf_sk);
1059 }
1060
1061 /*
1062 * Socket is no longer in state pending close,
1063 * and we can release the reference.
1064 */
1065
1066 dbfs_atomic_inc(&cnt.num_close);
1067 drain_queue(cf_sk);
1068 SET_RX_FLOW_ON(cf_sk);
1069 cf_sk->file_mode = 0;
1070 sock_put(&cf_sk->sk);
1071 release_sock(&cf_sk->sk);
1072 if (!result && (sock->file->f_flags & O_NONBLOCK)) {
1073 pr_debug("nonblocking shutdown returing -EAGAIN\n");
1074 return -EAGAIN;
1075 } else
1076 return result;
1077}
1078
1079static ssize_t caif_sock_no_sendpage(struct socket *sock,
1080 struct page *page,
1081 int offset, size_t size, int flags)
1082{
1083 return -EOPNOTSUPP;
1084}
1085
1086/* This function is called as part of close. */
1087static int caif_release(struct socket *sock)
1088{
1089 struct sock *sk = sock->sk;
1090 struct caifsock *cf_sk = NULL;
1091 int res;
1092 caif_assert(sk != NULL);
1093 cf_sk = container_of(sk, struct caifsock, sk);
1094
1095 if (cf_sk->debugfs_socket_dir != NULL)
1096 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
1097
1098 res = caif_shutdown(sock, SHUT_RDWR);
1099 if (res && res != -EINPROGRESS)
1100 return res;
1101
1102 /*
1103 * FIXME: Shutdown should probably be possible to do async
1104 * without flushing queues, allowing reception of frames while
1105 * waiting for DEINIT_IND.
1106 * Release should always block, to allow secure decoupling of
1107 * CAIF stack.
1108 */
1109 if (!(sock->file->f_flags & O_NONBLOCK)) {
1110 res = wait_event_interruptible(*cf_sk->sk.sk_sleep,
1111 !STATE_IS_PENDING(cf_sk));
1112
1113 if (res == -ERESTARTSYS) {
1114 pr_debug("CAIF: %s(): wait_event_interruptible"
1115 "woken by a signal (1)", __func__);
1116 }
1117 }
1118 lock_sock(&(cf_sk->sk));
1119
1120 sock->sk = NULL;
1121
1122 /* Detach the socket from its process context by making it orphan. */
1123 sock_orphan(sk);
1124
1125 /*
1126 * Setting SHUTDOWN_MASK means that both send and receive are shutdown
1127 * for the socket.
1128 */
1129 sk->sk_shutdown = SHUTDOWN_MASK;
1130
1131 /*
1132 * Set the socket state to closed, the TCP_CLOSE macro is used when
1133 * closing any socket.
1134 */
1135
1136 /* Flush out this sockets receive queue. */
1137 drain_queue(cf_sk);
1138
1139 /* Finally release the socket. */
1140 SET_STATE_PENDING_DESTROY(cf_sk);
1141
1142 release_sock(&cf_sk->sk);
1143
1144 sock_put(sk);
1145
1146 /*
1147 * The rest of the cleanup will be handled from the
1148 * caif_sock_destructor
1149 */
1150 return res;
1151}
1152
1153static const struct proto_ops caif_ops = {
1154 .family = PF_CAIF,
1155 .owner = THIS_MODULE,
1156 .release = caif_release,
1157 .bind = sock_no_bind,
1158 .connect = caif_connect,
1159 .socketpair = sock_no_socketpair,
1160 .accept = sock_no_accept,
1161 .getname = sock_no_getname,
1162 .poll = caif_poll,
1163 .ioctl = sock_no_ioctl,
1164 .listen = sock_no_listen,
1165 .shutdown = caif_shutdown,
1166 .setsockopt = setsockopt,
1167 .getsockopt = sock_no_getsockopt,
1168 .sendmsg = caif_sendmsg,
1169 .recvmsg = caif_recvmsg,
1170 .mmap = sock_no_mmap,
1171 .sendpage = caif_sock_no_sendpage,
1172};
1173
1174/* This function is called when a socket is finally destroyed. */
1175static void caif_sock_destructor(struct sock *sk)
1176{
1177 struct caifsock *cf_sk = NULL;
1178 cf_sk = container_of(sk, struct caifsock, sk);
1179 /* Error checks. */
1180 caif_assert(!atomic_read(&sk->sk_wmem_alloc));
1181 caif_assert(sk_unhashed(sk));
1182 caif_assert(!sk->sk_socket);
1183 if (!sock_flag(sk, SOCK_DEAD)) {
1184 pr_debug("CAIF: %s(): 0x%p", __func__, sk);
1185 return;
1186 }
1187
1188 if (STATE_IS_OPEN(cf_sk)) {
1189 pr_debug("CAIF: %s(): socket is opened (cf_sk=%p)"
1190 " file_mode = 0x%x\n", __func__,
1191 cf_sk, cf_sk->file_mode);
1192 return;
1193 }
1194 drain_queue(cf_sk);
1195 kfree(cf_sk->pktq);
1196
1197 trace_printk("CAIF: %s(): caif_sock_destructor: Removing socket %s\n",
1198 __func__, cf_sk->name);
1199 atomic_dec(&caif_nr_socks);
1200}
1201
1202static int caif_create(struct net *net, struct socket *sock, int protocol,
1203 int kern)
1204{
1205 struct sock *sk = NULL;
1206 struct caifsock *cf_sk = NULL;
1207 int result = 0;
1208 static struct proto prot = {.name = "PF_CAIF",
1209 .owner = THIS_MODULE,
1210 .obj_size = sizeof(struct caifsock),
1211 };
1212
1213 /*
1214 * The sock->type specifies the socket type to use.
1215 * in SEQPACKET mode packet boundaries are enforced.
1216 */
1217 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1218 return -ESOCKTNOSUPPORT;
1219
1220 if (net != &init_net)
1221 return -EAFNOSUPPORT;
1222
1223 if (protocol < 0 || protocol >= CAIFPROTO_MAX)
1224 return -EPROTONOSUPPORT;
1225 /*
1226 * Set the socket state to unconnected. The socket state is really
1227 * not used at all in the net/core or socket.c but the
1228 * initialization makes sure that sock->state is not uninitialized.
1229 */
1230 sock->state = SS_UNCONNECTED;
1231
1232 sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
1233 if (!sk)
1234 return -ENOMEM;
1235
1236 cf_sk = container_of(sk, struct caifsock, sk);
1237
1238 /* Store the protocol */
1239 sk->sk_protocol = (unsigned char) protocol;
1240
1241 spin_lock_init(&cf_sk->read_queue_len_lock);
1242
1243 /* Fill in some information concerning the misc socket. */
1244 snprintf(cf_sk->name, sizeof(cf_sk->name), "cf_sk%d",
1245 atomic_read(&caif_nr_socks));
1246
1247 /*
1248 * Lock in order to try to stop someone from opening the socket
1249 * too early.
1250 */
1251 lock_sock(&(cf_sk->sk));
1252
1253 /* Initialize the nozero default sock structure data. */
1254 sock_init_data(sock, sk);
1255 sock->ops = &caif_ops;
1256 sk->sk_destruct = caif_sock_destructor;
1257 sk->sk_sndbuf = caif_sockbuf_size;
1258 sk->sk_rcvbuf = caif_sockbuf_size;
1259
1260 cf_sk->pktq = cfpktq_create();
1261
1262 if (!cf_sk->pktq) {
1263 pr_err("CAIF: %s(): queue create failed.\n", __func__);
1264 result = -ENOMEM;
1265 release_sock(&cf_sk->sk);
1266 goto err_failed;
1267 }
1268 cf_sk->layer.ctrlcmd = caif_sktflowctrl_cb;
1269 SET_STATE_CLOSED(cf_sk);
1270 SET_PENDING_OFF(cf_sk);
1271 SET_TX_FLOW_OFF(cf_sk);
1272 SET_RX_FLOW_ON(cf_sk);
1273
1274 /* Set default options on configuration */
1275 cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
1276 cf_sk->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
1277 cf_sk->conn_req.protocol = protocol;
1278 /* Increase the number of sockets created. */
1279 atomic_inc(&caif_nr_socks);
1280 if (!IS_ERR(debugfsdir)) {
1281 cf_sk->debugfs_socket_dir =
1282 debugfs_create_dir(cf_sk->name, debugfsdir);
1283 debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR,
1284 cf_sk->debugfs_socket_dir, &cf_sk->conn_state);
1285 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1286 cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1287 debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR,
1288 cf_sk->debugfs_socket_dir,
1289 (u32 *) &cf_sk->read_queue_len);
1290 debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1291 cf_sk->debugfs_socket_dir,
1292 (u32 *) &cf_sk->layer.id);
1293 }
1294 release_sock(&cf_sk->sk);
1295 return 0;
1296err_failed:
1297 sk_free(sk);
1298 return result;
1299}
1300
1301static struct net_proto_family caif_family_ops = {
1302 .family = PF_CAIF,
1303 .create = caif_create,
1304 .owner = THIS_MODULE,
1305};
1306
1307static int af_caif_init(void)
1308{
1309 int err;
1310 err = sock_register(&caif_family_ops);
1311
1312 if (!err)
1313 return err;
1314
1315 return 0;
1316}
1317
1318static int __init caif_sktinit_module(void)
1319{
1320 int stat;
1321#ifdef CONFIG_DEBUG_FS
1322 debugfsdir = debugfs_create_dir("chnl_skt", NULL);
1323 if (!IS_ERR(debugfsdir)) {
1324 debugfs_create_u32("skb_inuse", S_IRUSR | S_IWUSR,
1325 debugfsdir,
1326 (u32 *) &cnt.skb_in_use);
1327 debugfs_create_u32("skb_alloc", S_IRUSR | S_IWUSR,
1328 debugfsdir,
1329 (u32 *) &cnt.skb_alloc);
1330 debugfs_create_u32("skb_free", S_IRUSR | S_IWUSR,
1331 debugfsdir,
1332 (u32 *) &cnt.skb_free);
1333 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1334 debugfsdir,
1335 (u32 *) &caif_nr_socks);
1336 debugfs_create_u32("num_open", S_IRUSR | S_IWUSR,
1337 debugfsdir,
1338 (u32 *) &cnt.num_open);
1339 debugfs_create_u32("num_close", S_IRUSR | S_IWUSR,
1340 debugfsdir,
1341 (u32 *) &cnt.num_close);
1342 debugfs_create_u32("num_init", S_IRUSR | S_IWUSR,
1343 debugfsdir,
1344 (u32 *) &cnt.num_init);
1345 debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR,
1346 debugfsdir,
1347 (u32 *) &cnt.num_init_resp);
1348 debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR,
1349 debugfsdir,
1350 (u32 *) &cnt.num_init_fail_resp);
1351 debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR,
1352 debugfsdir,
1353 (u32 *) &cnt.num_deinit);
1354 debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR,
1355 debugfsdir,
1356 (u32 *) &cnt.num_deinit_resp);
1357 debugfs_create_u32("num_remote_shutdown_ind",
1358 S_IRUSR | S_IWUSR, debugfsdir,
1359 (u32 *) &cnt.num_remote_shutdown_ind);
1360 debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
1361 debugfsdir,
1362 (u32 *) &cnt.num_tx_flow_off_ind);
1363 debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
1364 debugfsdir,
1365 (u32 *) &cnt.num_tx_flow_on_ind);
1366 debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
1367 debugfsdir,
1368 (u32 *) &cnt.num_rx_flow_off);
1369 debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
1370 debugfsdir,
1371 (u32 *) &cnt.num_rx_flow_on);
1372 }
1373#endif
1374 stat = af_caif_init();
1375 if (stat) {
1376 pr_err("CAIF: %s(): Failed to initialize CAIF socket layer.",
1377 __func__);
1378 return stat;
1379 }
1380 return 0;
1381}
1382
1383static void __exit caif_sktexit_module(void)
1384{
1385 sock_unregister(PF_CAIF);
1386 if (debugfsdir != NULL)
1387 debugfs_remove_recursive(debugfsdir);
1388}
1389
1390module_init(caif_sktinit_module);
1391module_exit(caif_sktexit_module);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
new file mode 100644
index 000000000000..c873e3d4387c
--- /dev/null
+++ b/net/caif/cfcnfg.c
@@ -0,0 +1,530 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6#include <linux/kernel.h>
7#include <linux/stddef.h>
8#include <linux/slab.h>
9#include <net/caif/caif_layer.h>
10#include <net/caif/cfpkt.h>
11#include <net/caif/cfcnfg.h>
12#include <net/caif/cfctrl.h>
13#include <net/caif/cfmuxl.h>
14#include <net/caif/cffrml.h>
15#include <net/caif/cfserl.h>
16#include <net/caif/cfsrvl.h>
17
18#include <linux/module.h>
19#include <asm/atomic.h>
20
21#define MAX_PHY_LAYERS 7
22#define PHY_NAME_LEN 20
23
24#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
25
26/* Information about CAIF physical interfaces held by Config Module in order
27 * to manage physical interfaces
28 */
29struct cfcnfg_phyinfo {
30 /* Pointer to the layer below the MUX (framing layer) */
31 struct cflayer *frm_layer;
32 /* Pointer to the lowest actual physical layer */
33 struct cflayer *phy_layer;
34 /* Unique identifier of the physical interface */
35 unsigned int id;
36 /* Preference of the physical in interface */
37 enum cfcnfg_phy_preference pref;
38
39 /* Reference count, number of channels using the device */
40 int phy_ref_count;
41
42 /* Information about the physical device */
43 struct dev_info dev_info;
44};
45
46struct cfcnfg {
47 struct cflayer layer;
48 struct cflayer *ctrl;
49 struct cflayer *mux;
50 u8 last_phyid;
51 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS];
52};
53
54static void cncfg_linkup_rsp(struct cflayer *layer, u8 linkid,
55 enum cfctrl_srv serv, u8 phyid,
56 struct cflayer *adapt_layer);
57static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid,
58 struct cflayer *client_layer);
59static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid,
60 struct cflayer *adapt_layer);
61static void cfctrl_resp_func(void);
62static void cfctrl_enum_resp(void);
63
64struct cfcnfg *cfcnfg_create(void)
65{
66 struct cfcnfg *this;
67 struct cfctrl_rsp *resp;
68 /* Initiate this layer */
69 this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
70 if (!this) {
71 pr_warning("CAIF: %s(): Out of memory\n", __func__);
72 return NULL;
73 }
74 memset(this, 0, sizeof(struct cfcnfg));
75 this->mux = cfmuxl_create();
76 if (!this->mux)
77 goto out_of_mem;
78 this->ctrl = cfctrl_create();
79 if (!this->ctrl)
80 goto out_of_mem;
81 /* Initiate response functions */
82 resp = cfctrl_get_respfuncs(this->ctrl);
83 resp->enum_rsp = cfctrl_enum_resp;
84 resp->linkerror_ind = cfctrl_resp_func;
85 resp->linkdestroy_rsp = cncfg_linkdestroy_rsp;
86 resp->sleep_rsp = cfctrl_resp_func;
87 resp->wake_rsp = cfctrl_resp_func;
88 resp->restart_rsp = cfctrl_resp_func;
89 resp->radioset_rsp = cfctrl_resp_func;
90 resp->linksetup_rsp = cncfg_linkup_rsp;
91 resp->reject_rsp = cncfg_reject_rsp;
92
93 this->last_phyid = 1;
94
95 cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
96 layer_set_dn(this->ctrl, this->mux);
97 layer_set_up(this->ctrl, this);
98 return this;
99out_of_mem:
100 pr_warning("CAIF: %s(): Out of memory\n", __func__);
101 kfree(this->mux);
102 kfree(this->ctrl);
103 kfree(this);
104 return NULL;
105}
106EXPORT_SYMBOL(cfcnfg_create);
107
108void cfcnfg_remove(struct cfcnfg *cfg)
109{
110 if (cfg) {
111 kfree(cfg->mux);
112 kfree(cfg->ctrl);
113 kfree(cfg);
114 }
115}
116
117static void cfctrl_resp_func(void)
118{
119}
120
121static void cfctrl_enum_resp(void)
122{
123}
124
125struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
126 enum cfcnfg_phy_preference phy_pref)
127{
128 u16 i;
129
130 /* Try to match with specified preference */
131 for (i = 1; i < MAX_PHY_LAYERS; i++) {
132 if (cnfg->phy_layers[i].id == i &&
133 cnfg->phy_layers[i].pref == phy_pref &&
134 cnfg->phy_layers[i].frm_layer != NULL) {
135 caif_assert(cnfg->phy_layers != NULL);
136 caif_assert(cnfg->phy_layers[i].id == i);
137 return &cnfg->phy_layers[i].dev_info;
138 }
139 }
140 /* Otherwise just return something */
141 for (i = 1; i < MAX_PHY_LAYERS; i++) {
142 if (cnfg->phy_layers[i].id == i) {
143 caif_assert(cnfg->phy_layers != NULL);
144 caif_assert(cnfg->phy_layers[i].id == i);
145 return &cnfg->phy_layers[i].dev_info;
146 }
147 }
148
149 return NULL;
150}
151
152static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
153 u8 phyid)
154{
155 int i;
156 /* Try to match with specified preference */
157 for (i = 0; i < MAX_PHY_LAYERS; i++)
158 if (cnfg->phy_layers[i].frm_layer != NULL &&
159 cnfg->phy_layers[i].id == phyid)
160 return &cnfg->phy_layers[i];
161 return NULL;
162}
163
164int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
165{
166 int i;
167
168 /* Try to match with specified name */
169 for (i = 0; i < MAX_PHY_LAYERS; i++) {
170 if (cnfg->phy_layers[i].frm_layer != NULL
171 && strcmp(cnfg->phy_layers[i].phy_layer->name,
172 name) == 0)
173 return cnfg->phy_layers[i].frm_layer->id;
174 }
175 return 0;
176}
177
178/*
179 * NOTE: What happens on destroy failure:
180 * 1a) No response - Too early
181 * This will not happen because enumerate has already
182 * completed.
183 * 1b) No response - FATAL
184 * Not handled, but this should be a CAIF PROTOCOL ERROR
185 * Modem error, response is really expected - this
186 * case is not really handled.
187 * 2) O/E-bit indicate error
188 * Ignored - this link is destroyed anyway.
189 * 3) Not able to match on request
190 * Not handled, but this should be a CAIF PROTOCOL ERROR
191 * 4) Link-Error - (no response)
192 * Not handled, but this should be a CAIF PROTOCOL ERROR
193 */
194
195int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
196{
197 u8 channel_id = 0;
198 int ret = 0;
199 struct cfcnfg_phyinfo *phyinfo = NULL;
200 u8 phyid = 0;
201
202 caif_assert(adap_layer != NULL);
203 channel_id = adap_layer->id;
204 if (channel_id == 0) {
205 pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
206 ret = -ENOTCONN;
207 goto end;
208 }
209
210 if (adap_layer->dn == NULL) {
211 pr_err("CAIF: %s():adap_layer->dn is NULL\n", __func__);
212 ret = -ENODEV;
213 goto end;
214 }
215
216 if (adap_layer->dn != NULL)
217 phyid = cfsrvl_getphyid(adap_layer->dn);
218
219 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
220 if (phyinfo == NULL) {
221 pr_warning("CAIF: %s(): No interface to send disconnect to\n",
222 __func__);
223 ret = -ENODEV;
224 goto end;
225 }
226
227 if (phyinfo->id != phyid
228 || phyinfo->phy_layer->id != phyid
229 || phyinfo->frm_layer->id != phyid) {
230
231 pr_err("CAIF: %s(): Inconsistency in phy registration\n",
232 __func__);
233 ret = -EINVAL;
234 goto end;
235 }
236
237 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
238
239end:
240 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
241 phyinfo->phy_layer != NULL &&
242 phyinfo->phy_layer->modemcmd != NULL) {
243 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
244 _CAIF_MODEMCMD_PHYIF_USELESS);
245 }
246 return ret;
247
248}
249EXPORT_SYMBOL(cfcnfg_del_adapt_layer);
250
251static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid,
252 struct cflayer *client_layer)
253{
254 struct cfcnfg *cnfg = container_obj(layer);
255 struct cflayer *servl;
256
257 /*
258 * 1) Remove service from the MUX layer. The MUX must
259 * guarante that no more payload sent "upwards" (receive)
260 */
261 servl = cfmuxl_remove_uplayer(cnfg->mux, linkid);
262
263 if (servl == NULL) {
264 pr_err("CAIF: %s(): PROTOCOL ERROR "
265 "- Error removing service_layer Linkid(%d)",
266 __func__, linkid);
267 return;
268 }
269 caif_assert(linkid == servl->id);
270
271 if (servl != client_layer && servl->up != client_layer) {
272 pr_err("CAIF: %s(): Error removing service_layer "
273 "Linkid(%d) %p %p",
274 __func__, linkid, (void *) servl,
275 (void *) client_layer);
276 return;
277 }
278
279 /*
280 * 2) DEINIT_RSP must guarantee that no more packets are transmitted
281 * from client (adap_layer) when it returns.
282 */
283
284 if (servl->ctrlcmd == NULL) {
285 pr_err("CAIF: %s(): Error servl->ctrlcmd == NULL", __func__);
286 return;
287 }
288
289 servl->ctrlcmd(servl, CAIF_CTRLCMD_DEINIT_RSP, 0);
290
291 /* 3) It is now safe to destroy the service layer. */
292 cfservl_destroy(servl);
293}
294
295/*
296 * NOTE: What happens on linksetup failure:
297 * 1a) No response - Too early
298 * This will not happen because enumerate is secured
299 * before using interface.
300 * 1b) No response - FATAL
301 * Not handled, but this should be a CAIF PROTOCOL ERROR
302 * Modem error, response is really expected - this case is
303 * not really handled.
304 * 2) O/E-bit indicate error
305 * Handled in cnfg_reject_rsp
306 * 3) Not able to match on request
307 * Not handled, but this should be a CAIF PROTOCOL ERROR
308 * 4) Link-Error - (no response)
309 * Not handled, but this should be a CAIF PROTOCOL ERROR
310 */
311
312int
313cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
314 struct cfctrl_link_param *param,
315 struct cflayer *adap_layer)
316{
317 struct cflayer *frml;
318 if (adap_layer == NULL) {
319 pr_err("CAIF: %s(): adap_layer is zero", __func__);
320 return -EINVAL;
321 }
322 if (adap_layer->receive == NULL) {
323 pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__);
324 return -EINVAL;
325 }
326 if (adap_layer->ctrlcmd == NULL) {
327 pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__);
328 return -EINVAL;
329 }
330 frml = cnfg->phy_layers[param->phyid].frm_layer;
331 if (frml == NULL) {
332 pr_err("CAIF: %s(): Specified PHY type does not exist!",
333 __func__);
334 return -ENODEV;
335 }
336 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
337 caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id ==
338 param->phyid);
339 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
340 param->phyid);
341 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
342 cfctrl_enum_req(cnfg->ctrl, param->phyid);
343 cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
344 return 0;
345}
346EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
347
348static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid,
349 struct cflayer *adapt_layer)
350{
351 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
352 adapt_layer->ctrlcmd(adapt_layer,
353 CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
354}
355
356static void
357cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, enum cfctrl_srv serv,
358 u8 phyid, struct cflayer *adapt_layer)
359{
360 struct cfcnfg *cnfg = container_obj(layer);
361 struct cflayer *servicel = NULL;
362 struct cfcnfg_phyinfo *phyinfo;
363 if (adapt_layer == NULL) {
364 pr_err("CAIF: %s(): PROTOCOL ERROR "
365 "- LinkUp Request/Response did not match\n", __func__);
366 return;
367 }
368
369 caif_assert(cnfg != NULL);
370 caif_assert(phyid != 0);
371 phyinfo = &cnfg->phy_layers[phyid];
372 caif_assert(phyinfo != NULL);
373 caif_assert(phyinfo->id == phyid);
374 caif_assert(phyinfo->phy_layer != NULL);
375 caif_assert(phyinfo->phy_layer->id == phyid);
376
377 if (phyinfo != NULL &&
378 phyinfo->phy_ref_count++ == 0 &&
379 phyinfo->phy_layer != NULL &&
380 phyinfo->phy_layer->modemcmd != NULL) {
381 caif_assert(phyinfo->phy_layer->id == phyid);
382 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
383 _CAIF_MODEMCMD_PHYIF_USEFULL);
384
385 }
386 adapt_layer->id = linkid;
387
388 switch (serv) {
389 case CFCTRL_SRV_VEI:
390 servicel = cfvei_create(linkid, &phyinfo->dev_info);
391 break;
392 case CFCTRL_SRV_DATAGRAM:
393 servicel = cfdgml_create(linkid, &phyinfo->dev_info);
394 break;
395 case CFCTRL_SRV_RFM:
396 servicel = cfrfml_create(linkid, &phyinfo->dev_info);
397 break;
398 case CFCTRL_SRV_UTIL:
399 servicel = cfutill_create(linkid, &phyinfo->dev_info);
400 break;
401 case CFCTRL_SRV_VIDEO:
402 servicel = cfvidl_create(linkid, &phyinfo->dev_info);
403 break;
404 case CFCTRL_SRV_DBG:
405 servicel = cfdbgl_create(linkid, &phyinfo->dev_info);
406 break;
407 default:
408 pr_err("CAIF: %s(): Protocol error. "
409 "Link setup response - unknown channel type\n",
410 __func__);
411 return;
412 }
413 if (!servicel) {
414 pr_warning("CAIF: %s(): Out of memory\n", __func__);
415 return;
416 }
417 layer_set_dn(servicel, cnfg->mux);
418 cfmuxl_set_uplayer(cnfg->mux, servicel, linkid);
419 layer_set_up(servicel, adapt_layer);
420 layer_set_dn(adapt_layer, servicel);
421 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
422}
423
424void
425cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
426 void *dev, struct cflayer *phy_layer, u16 *phyid,
427 enum cfcnfg_phy_preference pref,
428 bool fcs, bool stx)
429{
430 struct cflayer *frml;
431 struct cflayer *phy_driver = NULL;
432 int i;
433
434
435 if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) {
436 *phyid = cnfg->last_phyid;
437
438 /* range: * 1..(MAX_PHY_LAYERS-1) */
439 cnfg->last_phyid =
440 (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1;
441 } else {
442 *phyid = 0;
443 for (i = 1; i < MAX_PHY_LAYERS; i++) {
444 if (cnfg->phy_layers[i].frm_layer == NULL) {
445 *phyid = i;
446 break;
447 }
448 }
449 }
450 if (*phyid == 0) {
451 pr_err("CAIF: %s(): No Available PHY ID\n", __func__);
452 return;
453 }
454
455 switch (phy_type) {
456 case CFPHYTYPE_FRAG:
457 phy_driver =
458 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
459 if (!phy_driver) {
460 pr_warning("CAIF: %s(): Out of memory\n", __func__);
461 return;
462 }
463
464 break;
465 case CFPHYTYPE_CAIF:
466 phy_driver = NULL;
467 break;
468 default:
469 pr_err("CAIF: %s(): %d", __func__, phy_type);
470 return;
471 break;
472 }
473
474 phy_layer->id = *phyid;
475 cnfg->phy_layers[*phyid].pref = pref;
476 cnfg->phy_layers[*phyid].id = *phyid;
477 cnfg->phy_layers[*phyid].dev_info.id = *phyid;
478 cnfg->phy_layers[*phyid].dev_info.dev = dev;
479 cnfg->phy_layers[*phyid].phy_layer = phy_layer;
480 cnfg->phy_layers[*phyid].phy_ref_count = 0;
481 phy_layer->type = phy_type;
482 frml = cffrml_create(*phyid, fcs);
483 if (!frml) {
484 pr_warning("CAIF: %s(): Out of memory\n", __func__);
485 return;
486 }
487 cnfg->phy_layers[*phyid].frm_layer = frml;
488 cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
489 layer_set_up(frml, cnfg->mux);
490
491 if (phy_driver != NULL) {
492 phy_driver->id = *phyid;
493 layer_set_dn(frml, phy_driver);
494 layer_set_up(phy_driver, frml);
495 layer_set_dn(phy_driver, phy_layer);
496 layer_set_up(phy_layer, phy_driver);
497 } else {
498 layer_set_dn(frml, phy_layer);
499 layer_set_up(phy_layer, frml);
500 }
501}
502EXPORT_SYMBOL(cfcnfg_add_phy_layer);
503
504int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
505{
506 struct cflayer *frml, *frml_dn;
507 u16 phyid;
508 phyid = phy_layer->id;
509 caif_assert(phyid == cnfg->phy_layers[phyid].id);
510 caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer);
511 caif_assert(phy_layer->id == phyid);
512 caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid);
513
514 memset(&cnfg->phy_layers[phy_layer->id], 0,
515 sizeof(struct cfcnfg_phyinfo));
516 frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
517 frml_dn = frml->dn;
518 cffrml_set_uplayer(frml, NULL);
519 cffrml_set_dnlayer(frml, NULL);
520 kfree(frml);
521
522 if (phy_layer != frml_dn) {
523 layer_set_up(frml_dn, NULL);
524 layer_set_dn(frml_dn, NULL);
525 kfree(frml_dn);
526 }
527 layer_set_up(phy_layer, NULL);
528 return 0;
529}
530EXPORT_SYMBOL(cfcnfg_del_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
new file mode 100644
index 000000000000..11f80140f3cb
--- /dev/null
+++ b/net/caif/cfctrl.c
@@ -0,0 +1,664 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfpkt.h>
12#include <net/caif/cfctrl.h>
13
14#define container_obj(layr) container_of(layr, struct cfctrl, serv.layer)
15#define UTILITY_NAME_LENGTH 16
16#define CFPKT_CTRL_PKT_LEN 20
17
18
19#ifdef CAIF_NO_LOOP
20static int handle_loop(struct cfctrl *ctrl,
21 int cmd, struct cfpkt *pkt){
22 return CAIF_FAILURE;
23}
24#else
25static int handle_loop(struct cfctrl *ctrl,
26 int cmd, struct cfpkt *pkt);
27#endif
28static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
29static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
30 int phyid);
31
32
33struct cflayer *cfctrl_create(void)
34{
35 struct cfctrl *this =
36 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
37 if (!this) {
38 pr_warning("CAIF: %s(): Out of memory\n", __func__);
39 return NULL;
40 }
41 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
42 memset(this, 0, sizeof(*this));
43 spin_lock_init(&this->info_list_lock);
44 atomic_set(&this->req_seq_no, 1);
45 atomic_set(&this->rsp_seq_no, 1);
46 this->serv.dev_info.id = 0xff;
47 this->serv.layer.id = 0;
48 this->serv.layer.receive = cfctrl_recv;
49 sprintf(this->serv.layer.name, "ctrl");
50 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
51 spin_lock_init(&this->loop_linkid_lock);
52 this->loop_linkid = 1;
53 return &this->serv.layer;
54}
55
56static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
57{
58 bool eq =
59 p1->linktype == p2->linktype &&
60 p1->priority == p2->priority &&
61 p1->phyid == p2->phyid &&
62 p1->endpoint == p2->endpoint && p1->chtype == p2->chtype;
63
64 if (!eq)
65 return false;
66
67 switch (p1->linktype) {
68 case CFCTRL_SRV_VEI:
69 return true;
70 case CFCTRL_SRV_DATAGRAM:
71 return p1->u.datagram.connid == p2->u.datagram.connid;
72 case CFCTRL_SRV_RFM:
73 return
74 p1->u.rfm.connid == p2->u.rfm.connid &&
75 strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0;
76 case CFCTRL_SRV_UTIL:
77 return
78 p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb
79 && p1->u.utility.fifosize_bufs ==
80 p2->u.utility.fifosize_bufs
81 && strcmp(p1->u.utility.name, p2->u.utility.name) == 0
82 && p1->u.utility.paramlen == p2->u.utility.paramlen
83 && memcmp(p1->u.utility.params, p2->u.utility.params,
84 p1->u.utility.paramlen) == 0;
85
86 case CFCTRL_SRV_VIDEO:
87 return p1->u.video.connid == p2->u.video.connid;
88 case CFCTRL_SRV_DBG:
89 return true;
90 case CFCTRL_SRV_DECM:
91 return false;
92 default:
93 return false;
94 }
95 return false;
96}
97
98bool cfctrl_req_eq(struct cfctrl_request_info *r1,
99 struct cfctrl_request_info *r2)
100{
101 if (r1->cmd != r2->cmd)
102 return false;
103 if (r1->cmd == CFCTRL_CMD_LINK_SETUP)
104 return param_eq(&r1->param, &r2->param);
105 else
106 return r1->channel_id == r2->channel_id;
107}
108
109/* Insert request at the end */
110void cfctrl_insert_req(struct cfctrl *ctrl,
111 struct cfctrl_request_info *req)
112{
113 struct cfctrl_request_info *p;
114 spin_lock(&ctrl->info_list_lock);
115 req->next = NULL;
116 atomic_inc(&ctrl->req_seq_no);
117 req->sequence_no = atomic_read(&ctrl->req_seq_no);
118 if (ctrl->first_req == NULL) {
119 ctrl->first_req = req;
120 spin_unlock(&ctrl->info_list_lock);
121 return;
122 }
123 p = ctrl->first_req;
124 while (p->next != NULL)
125 p = p->next;
126 p->next = req;
127 spin_unlock(&ctrl->info_list_lock);
128}
129
130static void cfctrl_insert_req2(struct cfctrl *ctrl, enum cfctrl_cmd cmd,
131 u8 linkid, struct cflayer *user_layer)
132{
133 struct cfctrl_request_info *req = kmalloc(sizeof(*req), GFP_KERNEL);
134 if (!req) {
135 pr_warning("CAIF: %s(): Out of memory\n", __func__);
136 return;
137 }
138 req->client_layer = user_layer;
139 req->cmd = cmd;
140 req->channel_id = linkid;
141 cfctrl_insert_req(ctrl, req);
142}
143
144/* Compare and remove request */
145struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
146 struct cfctrl_request_info *req)
147{
148 struct cfctrl_request_info *p;
149 struct cfctrl_request_info *ret;
150
151 spin_lock(&ctrl->info_list_lock);
152 if (ctrl->first_req == NULL) {
153 spin_unlock(&ctrl->info_list_lock);
154 return NULL;
155 }
156
157 if (cfctrl_req_eq(req, ctrl->first_req)) {
158 ret = ctrl->first_req;
159 caif_assert(ctrl->first_req);
160 atomic_set(&ctrl->rsp_seq_no,
161 ctrl->first_req->sequence_no);
162 ctrl->first_req = ctrl->first_req->next;
163 spin_unlock(&ctrl->info_list_lock);
164 return ret;
165 }
166
167 p = ctrl->first_req;
168
169 while (p->next != NULL) {
170 if (cfctrl_req_eq(req, p->next)) {
171 pr_warning("CAIF: %s(): Requests are not "
172 "received in order\n",
173 __func__);
174 ret = p->next;
175 atomic_set(&ctrl->rsp_seq_no,
176 p->next->sequence_no);
177 p->next = p->next->next;
178 spin_unlock(&ctrl->info_list_lock);
179 return ret;
180 }
181 p = p->next;
182 }
183 spin_unlock(&ctrl->info_list_lock);
184
185 pr_warning("CAIF: %s(): Request does not match\n",
186 __func__);
187 return NULL;
188}
189
190struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
191{
192 struct cfctrl *this = container_obj(layer);
193 return &this->res;
194}
195
196void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
197{
198 this->dn = dn;
199}
200
201void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
202{
203 this->up = up;
204}
205
206static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
207{
208 info->hdr_len = 0;
209 info->channel_id = cfctrl->serv.layer.id;
210 info->dev_info = &cfctrl->serv.dev_info;
211}
212
213void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
214{
215 struct cfctrl *cfctrl = container_obj(layer);
216 int ret;
217 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
218 if (!pkt) {
219 pr_warning("CAIF: %s(): Out of memory\n", __func__);
220 return;
221 }
222 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
223 init_info(cfpkt_info(pkt), cfctrl);
224 cfpkt_info(pkt)->dev_info->id = physlinkid;
225 cfctrl->serv.dev_info.id = physlinkid;
226 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
227 cfpkt_addbdy(pkt, physlinkid);
228 ret =
229 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
230 if (ret < 0) {
231 pr_err("CAIF: %s(): Could not transmit enum message\n",
232 __func__);
233 cfpkt_destroy(pkt);
234 }
235}
236
237void cfctrl_linkup_request(struct cflayer *layer,
238 struct cfctrl_link_param *param,
239 struct cflayer *user_layer)
240{
241 struct cfctrl *cfctrl = container_obj(layer);
242 u32 tmp32;
243 u16 tmp16;
244 u8 tmp8;
245 struct cfctrl_request_info *req;
246 int ret;
247 char utility_name[16];
248 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
249 if (!pkt) {
250 pr_warning("CAIF: %s(): Out of memory\n", __func__);
251 return;
252 }
253 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
254 cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype);
255 cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid);
256 cfpkt_addbdy(pkt, param->endpoint & 0x03);
257
258 switch (param->linktype) {
259 case CFCTRL_SRV_VEI:
260 break;
261 case CFCTRL_SRV_VIDEO:
262 cfpkt_addbdy(pkt, (u8) param->u.video.connid);
263 break;
264 case CFCTRL_SRV_DBG:
265 break;
266 case CFCTRL_SRV_DATAGRAM:
267 tmp32 = cpu_to_le32(param->u.datagram.connid);
268 cfpkt_add_body(pkt, &tmp32, 4);
269 break;
270 case CFCTRL_SRV_RFM:
271 /* Construct a frame, convert DatagramConnectionID to network
272 * format long and copy it out...
273 */
274 tmp32 = cpu_to_le32(param->u.rfm.connid);
275 cfpkt_add_body(pkt, &tmp32, 4);
276 /* Add volume name, including zero termination... */
277 cfpkt_add_body(pkt, param->u.rfm.volume,
278 strlen(param->u.rfm.volume) + 1);
279 break;
280 case CFCTRL_SRV_UTIL:
281 tmp16 = cpu_to_le16(param->u.utility.fifosize_kb);
282 cfpkt_add_body(pkt, &tmp16, 2);
283 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
284 cfpkt_add_body(pkt, &tmp16, 2);
285 memset(utility_name, 0, sizeof(utility_name));
286 strncpy(utility_name, param->u.utility.name,
287 UTILITY_NAME_LENGTH - 1);
288 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
289 tmp8 = param->u.utility.paramlen;
290 cfpkt_add_body(pkt, &tmp8, 1);
291 cfpkt_add_body(pkt, param->u.utility.params,
292 param->u.utility.paramlen);
293 break;
294 default:
295 pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
296 __func__, param->linktype);
297 }
298 req = kmalloc(sizeof(*req), GFP_KERNEL);
299 if (!req) {
300 pr_warning("CAIF: %s(): Out of memory\n", __func__);
301 return;
302 }
303 memset(req, 0, sizeof(*req));
304 req->client_layer = user_layer;
305 req->cmd = CFCTRL_CMD_LINK_SETUP;
306 req->param = *param;
307 cfctrl_insert_req(cfctrl, req);
308 init_info(cfpkt_info(pkt), cfctrl);
309 cfpkt_info(pkt)->dev_info->id = param->phyid;
310 ret =
311 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
312 if (ret < 0) {
313 pr_err("CAIF: %s(): Could not transmit linksetup request\n",
314 __func__);
315 cfpkt_destroy(pkt);
316 }
317}
318
319int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
320 struct cflayer *client)
321{
322 int ret;
323 struct cfctrl *cfctrl = container_obj(layer);
324 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
325 if (!pkt) {
326 pr_warning("CAIF: %s(): Out of memory\n", __func__);
327 return -ENOMEM;
328 }
329 cfctrl_insert_req2(cfctrl, CFCTRL_CMD_LINK_DESTROY, channelid, client);
330 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
331 cfpkt_addbdy(pkt, channelid);
332 init_info(cfpkt_info(pkt), cfctrl);
333 ret =
334 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
335 if (ret < 0) {
336 pr_err("CAIF: %s(): Could not transmit link-down request\n",
337 __func__);
338 cfpkt_destroy(pkt);
339 }
340 return ret;
341}
342
343void cfctrl_sleep_req(struct cflayer *layer)
344{
345 int ret;
346 struct cfctrl *cfctrl = container_obj(layer);
347 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
348 if (!pkt) {
349 pr_warning("CAIF: %s(): Out of memory\n", __func__);
350 return;
351 }
352 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
353 init_info(cfpkt_info(pkt), cfctrl);
354 ret =
355 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
356 if (ret < 0)
357 cfpkt_destroy(pkt);
358}
359
360void cfctrl_wake_req(struct cflayer *layer)
361{
362 int ret;
363 struct cfctrl *cfctrl = container_obj(layer);
364 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
365 if (!pkt) {
366 pr_warning("CAIF: %s(): Out of memory\n", __func__);
367 return;
368 }
369 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
370 init_info(cfpkt_info(pkt), cfctrl);
371 ret =
372 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
373 if (ret < 0)
374 cfpkt_destroy(pkt);
375}
376
377void cfctrl_getstartreason_req(struct cflayer *layer)
378{
379 int ret;
380 struct cfctrl *cfctrl = container_obj(layer);
381 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
382 if (!pkt) {
383 pr_warning("CAIF: %s(): Out of memory\n", __func__);
384 return;
385 }
386 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
387 init_info(cfpkt_info(pkt), cfctrl);
388 ret =
389 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
390 if (ret < 0)
391 cfpkt_destroy(pkt);
392}
393
394
395static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
396{
397 u8 cmdrsp;
398 u8 cmd;
399 int ret = -1;
400 u16 tmp16;
401 u8 len;
402 u8 param[255];
403 u8 linkid;
404 struct cfctrl *cfctrl = container_obj(layer);
405 struct cfctrl_request_info rsp, *req;
406
407
408 cfpkt_extr_head(pkt, &cmdrsp, 1);
409 cmd = cmdrsp & CFCTRL_CMD_MASK;
410 if (cmd != CFCTRL_CMD_LINK_ERR
411 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
412 if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) {
413 pr_info("CAIF: %s() CAIF Protocol error:"
414 "Response bit not set\n", __func__);
415 goto error;
416 }
417 }
418
419 switch (cmd) {
420 case CFCTRL_CMD_LINK_SETUP:
421 {
422 enum cfctrl_srv serv;
423 enum cfctrl_srv servtype;
424 u8 endpoint;
425 u8 physlinkid;
426 u8 prio;
427 u8 tmp;
428 u32 tmp32;
429 u8 *cp;
430 int i;
431 struct cfctrl_link_param linkparam;
432 memset(&linkparam, 0, sizeof(linkparam));
433
434 cfpkt_extr_head(pkt, &tmp, 1);
435
436 serv = tmp & CFCTRL_SRV_MASK;
437 linkparam.linktype = serv;
438
439 servtype = tmp >> 4;
440 linkparam.chtype = servtype;
441
442 cfpkt_extr_head(pkt, &tmp, 1);
443 physlinkid = tmp & 0x07;
444 prio = tmp >> 3;
445
446 linkparam.priority = prio;
447 linkparam.phyid = physlinkid;
448 cfpkt_extr_head(pkt, &endpoint, 1);
449 linkparam.endpoint = endpoint & 0x03;
450
451 switch (serv) {
452 case CFCTRL_SRV_VEI:
453 case CFCTRL_SRV_DBG:
454 /* Link ID */
455 cfpkt_extr_head(pkt, &linkid, 1);
456 break;
457 case CFCTRL_SRV_VIDEO:
458 cfpkt_extr_head(pkt, &tmp, 1);
459 linkparam.u.video.connid = tmp;
460 /* Link ID */
461 cfpkt_extr_head(pkt, &linkid, 1);
462 break;
463
464 case CFCTRL_SRV_DATAGRAM:
465 cfpkt_extr_head(pkt, &tmp32, 4);
466 linkparam.u.datagram.connid =
467 le32_to_cpu(tmp32);
468 /* Link ID */
469 cfpkt_extr_head(pkt, &linkid, 1);
470 break;
471 case CFCTRL_SRV_RFM:
472 /* Construct a frame, convert
473 * DatagramConnectionID
474 * to network format long and copy it out...
475 */
476 cfpkt_extr_head(pkt, &tmp32, 4);
477 linkparam.u.rfm.connid =
478 le32_to_cpu(tmp32);
479 cp = (u8 *) linkparam.u.rfm.volume;
480 for (cfpkt_extr_head(pkt, &tmp, 1);
481 cfpkt_more(pkt) && tmp != '\0';
482 cfpkt_extr_head(pkt, &tmp, 1))
483 *cp++ = tmp;
484 *cp = '\0';
485
486 /* Link ID */
487 cfpkt_extr_head(pkt, &linkid, 1);
488
489 break;
490 case CFCTRL_SRV_UTIL:
491 /* Construct a frame, convert
492 * DatagramConnectionID
493 * to network format long and copy it out...
494 */
495 /* Fifosize KB */
496 cfpkt_extr_head(pkt, &tmp16, 2);
497 linkparam.u.utility.fifosize_kb =
498 le16_to_cpu(tmp16);
499 /* Fifosize bufs */
500 cfpkt_extr_head(pkt, &tmp16, 2);
501 linkparam.u.utility.fifosize_bufs =
502 le16_to_cpu(tmp16);
503 /* name */
504 cp = (u8 *) linkparam.u.utility.name;
505 caif_assert(sizeof(linkparam.u.utility.name)
506 >= UTILITY_NAME_LENGTH);
507 for (i = 0;
508 i < UTILITY_NAME_LENGTH
509 && cfpkt_more(pkt); i++) {
510 cfpkt_extr_head(pkt, &tmp, 1);
511 *cp++ = tmp;
512 }
513 /* Length */
514 cfpkt_extr_head(pkt, &len, 1);
515 linkparam.u.utility.paramlen = len;
516 /* Param Data */
517 cp = linkparam.u.utility.params;
518 while (cfpkt_more(pkt) && len--) {
519 cfpkt_extr_head(pkt, &tmp, 1);
520 *cp++ = tmp;
521 }
522 /* Link ID */
523 cfpkt_extr_head(pkt, &linkid, 1);
524 /* Length */
525 cfpkt_extr_head(pkt, &len, 1);
526 /* Param Data */
527 cfpkt_extr_head(pkt, &param, len);
528 break;
529 default:
530 pr_warning("CAIF: %s(): Request setup "
531 "- invalid link type (%d)",
532 __func__, serv);
533 goto error;
534 }
535
536 rsp.cmd = cmd;
537 rsp.param = linkparam;
538 req = cfctrl_remove_req(cfctrl, &rsp);
539
540 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
541 cfpkt_erroneous(pkt)) {
542 pr_err("CAIF: %s(): Invalid O/E bit or parse "
543 "error on CAIF control channel",
544 __func__);
545 cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
546 0,
547 req ? req->client_layer
548 : NULL);
549 } else {
550 cfctrl->res.linksetup_rsp(cfctrl->serv.
551 layer.up, linkid,
552 serv, physlinkid,
553 req ? req->
554 client_layer : NULL);
555 }
556
557 if (req != NULL)
558 kfree(req);
559 }
560 break;
561 case CFCTRL_CMD_LINK_DESTROY:
562 cfpkt_extr_head(pkt, &linkid, 1);
563 rsp.cmd = cmd;
564 rsp.channel_id = linkid;
565 req = cfctrl_remove_req(cfctrl, &rsp);
566 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid,
567 req ? req->client_layer : NULL);
568 if (req != NULL)
569 kfree(req);
570 break;
571 case CFCTRL_CMD_LINK_ERR:
572 pr_err("CAIF: %s(): Frame Error Indication received\n",
573 __func__);
574 cfctrl->res.linkerror_ind();
575 break;
576 case CFCTRL_CMD_ENUM:
577 cfctrl->res.enum_rsp();
578 break;
579 case CFCTRL_CMD_SLEEP:
580 cfctrl->res.sleep_rsp();
581 break;
582 case CFCTRL_CMD_WAKE:
583 cfctrl->res.wake_rsp();
584 break;
585 case CFCTRL_CMD_LINK_RECONF:
586 cfctrl->res.restart_rsp();
587 break;
588 case CFCTRL_CMD_RADIO_SET:
589 cfctrl->res.radioset_rsp();
590 break;
591 default:
592 pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__);
593 goto error;
594 break;
595 }
596 ret = 0;
597error:
598 cfpkt_destroy(pkt);
599 return ret;
600}
601
602static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
603 int phyid)
604{
605 struct cfctrl *this = container_obj(layr);
606 switch (ctrl) {
607 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
608 case CAIF_CTRLCMD_FLOW_OFF_IND:
609 spin_lock(&this->info_list_lock);
610 if (this->first_req != NULL) {
611 pr_warning("CAIF: %s(): Received flow off in "
612 "control layer", __func__);
613 }
614 spin_unlock(&this->info_list_lock);
615 break;
616 default:
617 break;
618 }
619}
620
621#ifndef CAIF_NO_LOOP
622static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
623{
624 static int last_linkid;
625 u8 linkid, linktype, tmp;
626 switch (cmd) {
627 case CFCTRL_CMD_LINK_SETUP:
628 spin_lock(&ctrl->loop_linkid_lock);
629 for (linkid = last_linkid + 1; linkid < 255; linkid++)
630 if (!ctrl->loop_linkused[linkid])
631 goto found;
632 for (linkid = last_linkid - 1; linkid > 0; linkid--)
633 if (!ctrl->loop_linkused[linkid])
634 goto found;
635 spin_unlock(&ctrl->loop_linkid_lock);
636 return -EINVAL;
637found:
638 if (!ctrl->loop_linkused[linkid])
639 ctrl->loop_linkused[linkid] = 1;
640
641 last_linkid = linkid;
642
643 cfpkt_add_trail(pkt, &linkid, 1);
644 spin_unlock(&ctrl->loop_linkid_lock);
645 cfpkt_peek_head(pkt, &linktype, 1);
646 if (linktype == CFCTRL_SRV_UTIL) {
647 tmp = 0x01;
648 cfpkt_add_trail(pkt, &tmp, 1);
649 cfpkt_add_trail(pkt, &tmp, 1);
650 }
651 break;
652
653 case CFCTRL_CMD_LINK_DESTROY:
654 spin_lock(&ctrl->loop_linkid_lock);
655 cfpkt_peek_head(pkt, &linkid, 1);
656 ctrl->loop_linkused[linkid] = 0;
657 spin_unlock(&ctrl->loop_linkid_lock);
658 break;
659 default:
660 break;
661 }
662 return CAIF_SUCCESS;
663}
664#endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
new file mode 100644
index 000000000000..ab6b6dc34cf8
--- /dev/null
+++ b/net/caif/cfdbgl.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/slab.h>
9#include <net/caif/caif_layer.h>
10#include <net/caif/cfsrvl.h>
11#include <net/caif/cfpkt.h>
12
13static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
14static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
15
16struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
17{
18 struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
19 if (!dbg) {
20 pr_warning("CAIF: %s(): Out of memory\n", __func__);
21 return NULL;
22 }
23 caif_assert(offsetof(struct cfsrvl, layer) == 0);
24 memset(dbg, 0, sizeof(struct cfsrvl));
25 cfsrvl_init(dbg, channel_id, dev_info);
26 dbg->layer.receive = cfdbgl_receive;
27 dbg->layer.transmit = cfdbgl_transmit;
28 snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
29 return &dbg->layer;
30}
31
32static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
33{
34 return layr->up->receive(layr->up, pkt);
35}
36
37static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
38{
39 return layr->dn->transmit(layr->dn, pkt);
40}
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
new file mode 100644
index 000000000000..53194840ecb6
--- /dev/null
+++ b/net/caif/cfdgml.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfsrvl.h>
12#include <net/caif/cfpkt.h>
13
14#define container_obj(layr) ((struct cfsrvl *) layr)
15
16#define DGM_CMD_BIT 0x80
17#define DGM_FLOW_OFF 0x81
18#define DGM_FLOW_ON 0x80
19#define DGM_CTRL_PKT_SIZE 1
20
21static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
22static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
23
24struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
25{
26 struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
27 if (!dgm) {
28 pr_warning("CAIF: %s(): Out of memory\n", __func__);
29 return NULL;
30 }
31 caif_assert(offsetof(struct cfsrvl, layer) == 0);
32 memset(dgm, 0, sizeof(struct cfsrvl));
33 cfsrvl_init(dgm, channel_id, dev_info);
34 dgm->layer.receive = cfdgml_receive;
35 dgm->layer.transmit = cfdgml_transmit;
36 snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
37 dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0';
38 return &dgm->layer;
39}
40
41static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
42{
43 u8 cmd = -1;
44 u8 dgmhdr[3];
45 int ret;
46 caif_assert(layr->up != NULL);
47 caif_assert(layr->receive != NULL);
48 caif_assert(layr->ctrlcmd != NULL);
49
50 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
51 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
52 cfpkt_destroy(pkt);
53 return -EPROTO;
54 }
55
56 if ((cmd & DGM_CMD_BIT) == 0) {
57 if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
58 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
59 cfpkt_destroy(pkt);
60 return -EPROTO;
61 }
62 ret = layr->up->receive(layr->up, pkt);
63 return ret;
64 }
65
66 switch (cmd) {
67 case DGM_FLOW_OFF: /* FLOW OFF */
68 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
69 cfpkt_destroy(pkt);
70 return 0;
71 case DGM_FLOW_ON: /* FLOW ON */
72 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
73 cfpkt_destroy(pkt);
74 return 0;
75 default:
76 cfpkt_destroy(pkt);
77 pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n",
78 __func__, cmd, cmd);
79 return -EPROTO;
80 }
81}
82
83static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
84{
85 u32 zero = 0;
86 struct caif_payload_info *info;
87 struct cfsrvl *service = container_obj(layr);
88 int ret;
89 if (!cfsrvl_ready(service, &ret))
90 return ret;
91
92 cfpkt_add_head(pkt, &zero, 4);
93
94 /* Add info for MUX-layer to route the packet out. */
95 info = cfpkt_info(pkt);
96 info->channel_id = service->layer.id;
97 /* To optimize alignment, we add up the size of CAIF header
98 * before payload.
99 */
100 info->hdr_len = 4;
101 info->dev_info = &service->dev_info;
102 ret = layr->dn->transmit(layr->dn, pkt);
103 if (ret < 0) {
104 u32 tmp32;
105 cfpkt_extr_head(pkt, &tmp32, 4);
106 }
107 return ret;
108}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
new file mode 100644
index 000000000000..e86a4ca3b217
--- /dev/null
+++ b/net/caif/cffrml.c
@@ -0,0 +1,151 @@
1/*
2 * CAIF Framing Layer.
3 *
4 * Copyright (C) ST-Ericsson AB 2010
5 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
9#include <linux/stddef.h>
10#include <linux/spinlock.h>
11#include <linux/slab.h>
12#include <linux/crc-ccitt.h>
13#include <net/caif/caif_layer.h>
14#include <net/caif/cfpkt.h>
15#include <net/caif/cffrml.h>
16
17#define container_obj(layr) container_of(layr, struct cffrml, layer)
18
19struct cffrml {
20 struct cflayer layer;
21 bool dofcs; /* !< FCS active */
22};
23
24static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
25static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt);
26static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
27 int phyid);
28
29static u32 cffrml_rcv_error;
30static u32 cffrml_rcv_checsum_error;
31struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
32{
33 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
34 if (!this) {
35 pr_warning("CAIF: %s(): Out of memory\n", __func__);
36 return NULL;
37 }
38 caif_assert(offsetof(struct cffrml, layer) == 0);
39
40 memset(this, 0, sizeof(struct cflayer));
41 this->layer.receive = cffrml_receive;
42 this->layer.transmit = cffrml_transmit;
43 this->layer.ctrlcmd = cffrml_ctrlcmd;
44 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid);
45 this->dofcs = use_fcs;
46 this->layer.id = phyid;
47 return (struct cflayer *) this;
48}
49
50void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
51{
52 this->up = up;
53}
54
55void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn)
56{
57 this->dn = dn;
58}
59
60static u16 cffrml_checksum(u16 chks, void *buf, u16 len)
61{
62 /* FIXME: FCS should be moved to glue in order to use OS-Specific
63 * solutions
64 */
65 return crc_ccitt(chks, buf, len);
66}
67
68static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
69{
70 u16 tmp;
71 u16 len;
72 u16 hdrchks;
73 u16 pktchks;
74 struct cffrml *this;
75 this = container_obj(layr);
76
77 cfpkt_extr_head(pkt, &tmp, 2);
78 len = le16_to_cpu(tmp);
79
80 /* Subtract for FCS on length if FCS is not used. */
81 if (!this->dofcs)
82 len -= 2;
83
84 if (cfpkt_setlen(pkt, len) < 0) {
85 ++cffrml_rcv_error;
86 pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len);
87 cfpkt_destroy(pkt);
88 return -EPROTO;
89 }
90 /*
91 * Don't do extract if FCS is false, rather do setlen - then we don't
92 * get a cache-miss.
93 */
94 if (this->dofcs) {
95 cfpkt_extr_trail(pkt, &tmp, 2);
96 hdrchks = le16_to_cpu(tmp);
97 pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
98 if (pktchks != hdrchks) {
99 cfpkt_add_trail(pkt, &tmp, 2);
100 ++cffrml_rcv_error;
101 ++cffrml_rcv_checsum_error;
102 pr_info("CAIF: %s(): Frame checksum error "
103 "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks);
104 return -EILSEQ;
105 }
106 }
107 if (cfpkt_erroneous(pkt)) {
108 ++cffrml_rcv_error;
109 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
110 cfpkt_destroy(pkt);
111 return -EPROTO;
112 }
113 return layr->up->receive(layr->up, pkt);
114}
115
116static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
117{
118 int tmp;
119 u16 chks;
120 u16 len;
121 int ret;
122 struct cffrml *this = container_obj(layr);
123 if (this->dofcs) {
124 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
125 tmp = cpu_to_le16(chks);
126 cfpkt_add_trail(pkt, &tmp, 2);
127 } else {
128 cfpkt_pad_trail(pkt, 2);
129 }
130 len = cfpkt_getlen(pkt);
131 tmp = cpu_to_le16(len);
132 cfpkt_add_head(pkt, &tmp, 2);
133 cfpkt_info(pkt)->hdr_len += 2;
134 if (cfpkt_erroneous(pkt)) {
135 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
136 return -EPROTO;
137 }
138 ret = layr->dn->transmit(layr->dn, pkt);
139 if (ret < 0) {
140 /* Remove header on faulty packet. */
141 cfpkt_extr_head(pkt, &tmp, 2);
142 }
143 return ret;
144}
145
146static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
147 int phyid)
148{
149 if (layr->up->ctrlcmd)
150 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
151}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
new file mode 100644
index 000000000000..6fb9f9e96cf8
--- /dev/null
+++ b/net/caif/cfmuxl.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
8#include <linux/slab.h>
9#include <net/caif/cfpkt.h>
10#include <net/caif/cfmuxl.h>
11#include <net/caif/cfsrvl.h>
12#include <net/caif/cffrml.h>
13
14#define container_obj(layr) container_of(layr, struct cfmuxl, layer)
15
16#define CAIF_CTRL_CHANNEL 0
17#define UP_CACHE_SIZE 8
18#define DN_CACHE_SIZE 8
19
20struct cfmuxl {
21 struct cflayer layer;
22 struct list_head srvl_list;
23 struct list_head frml_list;
24 struct cflayer *up_cache[UP_CACHE_SIZE];
25 struct cflayer *dn_cache[DN_CACHE_SIZE];
26 /*
27 * Set when inserting or removing downwards layers.
28 */
29 spinlock_t transmit_lock;
30
31 /*
32 * Set when inserting or removing upwards layers.
33 */
34 spinlock_t receive_lock;
35
36};
37
38static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
39static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
40static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
41 int phyid);
42static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
43
44struct cflayer *cfmuxl_create(void)
45{
46 struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
47 if (!this)
48 return NULL;
49 memset(this, 0, sizeof(*this));
50 this->layer.receive = cfmuxl_receive;
51 this->layer.transmit = cfmuxl_transmit;
52 this->layer.ctrlcmd = cfmuxl_ctrlcmd;
53 INIT_LIST_HEAD(&this->srvl_list);
54 INIT_LIST_HEAD(&this->frml_list);
55 spin_lock_init(&this->transmit_lock);
56 spin_lock_init(&this->receive_lock);
57 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
58 return &this->layer;
59}
60
61int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
62{
63 struct cfmuxl *muxl = container_obj(layr);
64 spin_lock(&muxl->receive_lock);
65 list_add(&up->node, &muxl->srvl_list);
66 spin_unlock(&muxl->receive_lock);
67 return 0;
68}
69
70bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid)
71{
72 struct list_head *node;
73 struct cflayer *layer;
74 struct cfmuxl *muxl = container_obj(layr);
75 bool match = false;
76 spin_lock(&muxl->receive_lock);
77
78 list_for_each(node, &muxl->srvl_list) {
79 layer = list_entry(node, struct cflayer, node);
80 if (cfsrvl_phyid_match(layer, phyid)) {
81 match = true;
82 break;
83 }
84
85 }
86 spin_unlock(&muxl->receive_lock);
87 return match;
88}
89
90u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id)
91{
92 struct cflayer *up;
93 int phyid;
94 struct cfmuxl *muxl = container_obj(layr);
95 spin_lock(&muxl->receive_lock);
96 up = get_up(muxl, channel_id);
97 if (up != NULL)
98 phyid = cfsrvl_getphyid(up);
99 else
100 phyid = 0;
101 spin_unlock(&muxl->receive_lock);
102 return phyid;
103}
104
105int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
106{
107 struct cfmuxl *muxl = (struct cfmuxl *) layr;
108 spin_lock(&muxl->transmit_lock);
109 list_add(&dn->node, &muxl->frml_list);
110 spin_unlock(&muxl->transmit_lock);
111 return 0;
112}
113
114static struct cflayer *get_from_id(struct list_head *list, u16 id)
115{
116 struct list_head *node;
117 struct cflayer *layer;
118 list_for_each(node, list) {
119 layer = list_entry(node, struct cflayer, node);
120 if (layer->id == id)
121 return layer;
122 }
123 return NULL;
124}
125
126struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
127{
128 struct cfmuxl *muxl = container_obj(layr);
129 struct cflayer *dn;
130 spin_lock(&muxl->transmit_lock);
131 memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache));
132 dn = get_from_id(&muxl->frml_list, phyid);
133 if (dn == NULL) {
134 spin_unlock(&muxl->transmit_lock);
135 return NULL;
136 }
137 list_del(&dn->node);
138 caif_assert(dn != NULL);
139 spin_unlock(&muxl->transmit_lock);
140 return dn;
141}
142
143/* Invariant: lock is taken */
144static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
145{
146 struct cflayer *up;
147 int idx = id % UP_CACHE_SIZE;
148 up = muxl->up_cache[idx];
149 if (up == NULL || up->id != id) {
150 up = get_from_id(&muxl->srvl_list, id);
151 muxl->up_cache[idx] = up;
152 }
153 return up;
154}
155
156/* Invariant: lock is taken */
157static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
158{
159 struct cflayer *dn;
160 int idx = dev_info->id % DN_CACHE_SIZE;
161 dn = muxl->dn_cache[idx];
162 if (dn == NULL || dn->id != dev_info->id) {
163 dn = get_from_id(&muxl->frml_list, dev_info->id);
164 muxl->dn_cache[idx] = dn;
165 }
166 return dn;
167}
168
169struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
170{
171 struct cflayer *up;
172 struct cfmuxl *muxl = container_obj(layr);
173 spin_lock(&muxl->receive_lock);
174 up = get_up(muxl, id);
175 memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
176 list_del(&up->node);
177 spin_unlock(&muxl->receive_lock);
178 return up;
179}
180
181static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
182{
183 int ret;
184 struct cfmuxl *muxl = container_obj(layr);
185 u8 id;
186 struct cflayer *up;
187 if (cfpkt_extr_head(pkt, &id, 1) < 0) {
188 pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__);
189 cfpkt_destroy(pkt);
190 return -EPROTO;
191 }
192
193 spin_lock(&muxl->receive_lock);
194 up = get_up(muxl, id);
195 spin_unlock(&muxl->receive_lock);
196 if (up == NULL) {
197 pr_info("CAIF: %s():Received data on unknown link ID = %d "
198 "(0x%x) up == NULL", __func__, id, id);
199 cfpkt_destroy(pkt);
200 /*
201 * Don't return ERROR, since modem misbehaves and sends out
202 * flow on before linksetup response.
203 */
204 return /* CFGLU_EPROT; */ 0;
205 }
206
207 ret = up->receive(up, pkt);
208 return ret;
209}
210
211static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
212{
213 int ret;
214 struct cfmuxl *muxl = container_obj(layr);
215 u8 linkid;
216 struct cflayer *dn;
217 struct caif_payload_info *info = cfpkt_info(pkt);
218 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
219 if (dn == NULL) {
220 pr_warning("CAIF: %s(): Send data on unknown phy "
221 "ID = %d (0x%x)\n",
222 __func__, info->dev_info->id, info->dev_info->id);
223 return -ENOTCONN;
224 }
225 info->hdr_len += 1;
226 linkid = info->channel_id;
227 cfpkt_add_head(pkt, &linkid, 1);
228 ret = dn->transmit(dn, pkt);
229 /* Remove MUX protocol header upon error. */
230 if (ret < 0)
231 cfpkt_extr_head(pkt, &linkid, 1);
232 return ret;
233}
234
235static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
236 int phyid)
237{
238 struct cfmuxl *muxl = container_obj(layr);
239 struct list_head *node;
240 struct cflayer *layer;
241 list_for_each(node, &muxl->srvl_list) {
242 layer = list_entry(node, struct cflayer, node);
243 if (cfsrvl_phyid_match(layer, phyid))
244 layer->ctrlcmd(layer, ctrl, phyid);
245 }
246}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
new file mode 100644
index 000000000000..83fff2ff6658
--- /dev/null
+++ b/net/caif/cfpkt_skbuff.c
@@ -0,0 +1,571 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/string.h>
8#include <linux/skbuff.h>
9#include <linux/hardirq.h>
10#include <net/caif/cfpkt.h>
11
12#define PKT_PREFIX CAIF_NEEDED_HEADROOM
13#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
14#define PKT_LEN_WHEN_EXTENDING 128
15#define PKT_ERROR(pkt, errmsg) do { \
16 cfpkt_priv(pkt)->erronous = true; \
17 skb_reset_tail_pointer(&pkt->skb); \
18 pr_warning("CAIF: " errmsg);\
19 } while (0)
20
21struct cfpktq {
22 struct sk_buff_head head;
23 atomic_t count;
24 /* Lock protects count updates */
25 spinlock_t lock;
26};
27
28/*
29 * net/caif/ is generic and does not
30 * understand SKB, so we do this typecast
31 */
32struct cfpkt {
33 struct sk_buff skb;
34};
35
36/* Private data inside SKB */
37struct cfpkt_priv_data {
38 struct dev_info dev_info;
39 bool erronous;
40};
41
42inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
43{
44 return (struct cfpkt_priv_data *) pkt->skb.cb;
45}
46
47inline bool is_erronous(struct cfpkt *pkt)
48{
49 return cfpkt_priv(pkt)->erronous;
50}
51
52inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
53{
54 return &pkt->skb;
55}
56
57inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
58{
59 return (struct cfpkt *) skb;
60}
61
62
63struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
64{
65 struct cfpkt *pkt = skb_to_pkt(nativepkt);
66 cfpkt_priv(pkt)->erronous = false;
67 return pkt;
68}
69EXPORT_SYMBOL(cfpkt_fromnative);
70
71void *cfpkt_tonative(struct cfpkt *pkt)
72{
73 return (void *) pkt;
74}
75EXPORT_SYMBOL(cfpkt_tonative);
76
77static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
78{
79 struct sk_buff *skb;
80
81 if (likely(in_interrupt()))
82 skb = alloc_skb(len + pfx, GFP_ATOMIC);
83 else
84 skb = alloc_skb(len + pfx, GFP_KERNEL);
85
86 if (unlikely(skb == NULL))
87 return NULL;
88
89 skb_reserve(skb, pfx);
90 return skb_to_pkt(skb);
91}
92
93inline struct cfpkt *cfpkt_create(u16 len)
94{
95 return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
96}
97EXPORT_SYMBOL(cfpkt_create);
98
99void cfpkt_destroy(struct cfpkt *pkt)
100{
101 struct sk_buff *skb = pkt_to_skb(pkt);
102 kfree_skb(skb);
103}
104EXPORT_SYMBOL(cfpkt_destroy);
105
106inline bool cfpkt_more(struct cfpkt *pkt)
107{
108 struct sk_buff *skb = pkt_to_skb(pkt);
109 return skb->len > 0;
110}
111EXPORT_SYMBOL(cfpkt_more);
112
113int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
114{
115 struct sk_buff *skb = pkt_to_skb(pkt);
116 if (skb_headlen(skb) >= len) {
117 memcpy(data, skb->data, len);
118 return 0;
119 }
120 return !cfpkt_extr_head(pkt, data, len) &&
121 !cfpkt_add_head(pkt, data, len);
122}
123EXPORT_SYMBOL(cfpkt_peek_head);
124
125int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
126{
127 struct sk_buff *skb = pkt_to_skb(pkt);
128 u8 *from;
129 if (unlikely(is_erronous(pkt)))
130 return -EPROTO;
131
132 if (unlikely(len > skb->len)) {
133 PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
134 return -EPROTO;
135 }
136
137 if (unlikely(len > skb_headlen(skb))) {
138 if (unlikely(skb_linearize(skb) != 0)) {
139 PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
140 return -EPROTO;
141 }
142 }
143 from = skb_pull(skb, len);
144 from -= len;
145 memcpy(data, from, len);
146 return 0;
147}
148EXPORT_SYMBOL(cfpkt_extr_head);
149
150int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
151{
152 struct sk_buff *skb = pkt_to_skb(pkt);
153 u8 *data = dta;
154 u8 *from;
155 if (unlikely(is_erronous(pkt)))
156 return -EPROTO;
157
158 if (unlikely(skb_linearize(skb) != 0)) {
159 PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n");
160 return -EPROTO;
161 }
162 if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
163 PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n");
164 return -EPROTO;
165 }
166 from = skb_tail_pointer(skb) - len;
167 skb_trim(skb, skb->len - len);
168 memcpy(data, from, len);
169 return 0;
170}
171EXPORT_SYMBOL(cfpkt_extr_trail);
172
173int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
174{
175 return cfpkt_add_body(pkt, NULL, len);
176}
177EXPORT_SYMBOL(cfpkt_pad_trail);
178
179int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
180{
181 struct sk_buff *skb = pkt_to_skb(pkt);
182 struct sk_buff *lastskb;
183 u8 *to;
184 u16 addlen = 0;
185
186
187 if (unlikely(is_erronous(pkt)))
188 return -EPROTO;
189
190 lastskb = skb;
191
192 /* Check whether we need to add space at the tail */
193 if (unlikely(skb_tailroom(skb) < len)) {
194 if (likely(len < PKT_LEN_WHEN_EXTENDING))
195 addlen = PKT_LEN_WHEN_EXTENDING;
196 else
197 addlen = len;
198 }
199
200 /* Check whether we need to change the SKB before writing to the tail */
201 if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) {
202
203 /* Make sure data is writable */
204 if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
205 PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n");
206 return -EPROTO;
207 }
208 /*
209 * Is the SKB non-linear after skb_cow_data()? If so, we are
210 * going to add data to the last SKB, so we need to adjust
211 * lengths of the top SKB.
212 */
213 if (lastskb != skb) {
214 pr_warning("CAIF: %s(): Packet is non-linear\n",
215 __func__);
216 skb->len += len;
217 skb->data_len += len;
218 }
219 }
220
221 /* All set to put the last SKB and optionally write data there. */
222 to = skb_put(lastskb, len);
223 if (likely(data))
224 memcpy(to, data, len);
225 return 0;
226}
227EXPORT_SYMBOL(cfpkt_add_body);
228
229inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data)
230{
231 return cfpkt_add_body(pkt, &data, 1);
232}
233EXPORT_SYMBOL(cfpkt_addbdy);
234
235int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
236{
237 struct sk_buff *skb = pkt_to_skb(pkt);
238 struct sk_buff *lastskb;
239 u8 *to;
240 const u8 *data = data2;
241 if (unlikely(is_erronous(pkt)))
242 return -EPROTO;
243 if (unlikely(skb_headroom(skb) < len)) {
244 PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n");
245 return -EPROTO;
246 }
247
248 /* Make sure data is writable */
249 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
250 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
251 return -EPROTO;
252 }
253
254 to = skb_push(skb, len);
255 memcpy(to, data, len);
256 return 0;
257}
258EXPORT_SYMBOL(cfpkt_add_head);
259
260inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
261{
262 return cfpkt_add_body(pkt, data, len);
263}
264EXPORT_SYMBOL(cfpkt_add_trail);
265
266inline u16 cfpkt_getlen(struct cfpkt *pkt)
267{
268 struct sk_buff *skb = pkt_to_skb(pkt);
269 return skb->len;
270}
271EXPORT_SYMBOL(cfpkt_getlen);
272
273inline u16 cfpkt_iterate(struct cfpkt *pkt,
274 u16 (*iter_func)(u16, void *, u16),
275 u16 data)
276{
277 /*
278 * Don't care about the performance hit of linearizing,
279 * Checksum should not be used on high-speed interfaces anyway.
280 */
281 if (unlikely(is_erronous(pkt)))
282 return -EPROTO;
283 if (unlikely(skb_linearize(&pkt->skb) != 0)) {
284 PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n");
285 return -EPROTO;
286 }
287 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
288}
289EXPORT_SYMBOL(cfpkt_iterate);
290
291int cfpkt_setlen(struct cfpkt *pkt, u16 len)
292{
293 struct sk_buff *skb = pkt_to_skb(pkt);
294
295
296 if (unlikely(is_erronous(pkt)))
297 return -EPROTO;
298
299 if (likely(len <= skb->len)) {
300 if (unlikely(skb->data_len))
301 ___pskb_trim(skb, len);
302 else
303 skb_trim(skb, len);
304
305 return cfpkt_getlen(pkt);
306 }
307
308 /* Need to expand SKB */
309 if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
310 PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n");
311
312 return cfpkt_getlen(pkt);
313}
314EXPORT_SYMBOL(cfpkt_setlen);
315
316struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
317{
318 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
319 if (unlikely(data != NULL))
320 cfpkt_add_body(pkt, data, len);
321 return pkt;
322}
323EXPORT_SYMBOL(cfpkt_create_uplink);
324
325struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
326 struct cfpkt *addpkt,
327 u16 expectlen)
328{
329 struct sk_buff *dst = pkt_to_skb(dstpkt);
330 struct sk_buff *add = pkt_to_skb(addpkt);
331 u16 addlen = skb_headlen(add);
332 u16 neededtailspace;
333 struct sk_buff *tmp;
334 u16 dstlen;
335 u16 createlen;
336 if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
337 cfpkt_destroy(addpkt);
338 return dstpkt;
339 }
340 if (expectlen > addlen)
341 neededtailspace = expectlen;
342 else
343 neededtailspace = addlen;
344
345 if (dst->tail + neededtailspace > dst->end) {
346 /* Create a dumplicate of 'dst' with more tail space */
347 dstlen = skb_headlen(dst);
348 createlen = dstlen + neededtailspace;
349 tmp = pkt_to_skb(
350 cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX));
351 if (!tmp)
352 return NULL;
353 skb_set_tail_pointer(tmp, dstlen);
354 tmp->len = dstlen;
355 memcpy(tmp->data, dst->data, dstlen);
356 cfpkt_destroy(dstpkt);
357 dst = tmp;
358 }
359 memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add));
360 cfpkt_destroy(addpkt);
361 dst->tail += addlen;
362 dst->len += addlen;
363 return skb_to_pkt(dst);
364}
365EXPORT_SYMBOL(cfpkt_append);
366
367struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
368{
369 struct sk_buff *skb2;
370 struct sk_buff *skb = pkt_to_skb(pkt);
371 u8 *split = skb->data + pos;
372 u16 len2nd = skb_tail_pointer(skb) - split;
373
374 if (unlikely(is_erronous(pkt)))
375 return NULL;
376
377 if (skb->data + pos > skb_tail_pointer(skb)) {
378 PKT_ERROR(pkt,
379 "cfpkt_split: trying to split beyond end of packet");
380 return NULL;
381 }
382
383 /* Create a new packet for the second part of the data */
384 skb2 = pkt_to_skb(
385 cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
386 PKT_PREFIX));
387
388 if (skb2 == NULL)
389 return NULL;
390
391 /* Reduce the length of the original packet */
392 skb_set_tail_pointer(skb, pos);
393 skb->len = pos;
394
395 memcpy(skb2->data, split, len2nd);
396 skb2->tail += len2nd;
397 skb2->len += len2nd;
398 return skb_to_pkt(skb2);
399}
400EXPORT_SYMBOL(cfpkt_split);
401
402char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
403{
404 struct sk_buff *skb = pkt_to_skb(pkt);
405 char *p = buf;
406 int i;
407
408 /*
409 * Sanity check buffer length, it needs to be at least as large as
410 * the header info: ~=50+ bytes
411 */
412 if (buflen < 50)
413 return NULL;
414
415 snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
416 is_erronous(pkt) ? "ERRONOUS-SKB" :
417 (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
418 skb,
419 (long) skb->len,
420 (long) (skb_tail_pointer(skb) - skb->data),
421 (long) skb->data_len,
422 (long) (skb->data - skb->head),
423 (long) (skb_tail_pointer(skb) - skb->head));
424 p = buf + strlen(buf);
425
426 for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
427 if (p > buf + buflen - 10) {
428 sprintf(p, "...");
429 p = buf + strlen(buf);
430 break;
431 }
432 sprintf(p, "%02x,", skb->data[i]);
433 p = buf + strlen(buf);
434 }
435 sprintf(p, "]\n");
436 return buf;
437}
438EXPORT_SYMBOL(cfpkt_log_pkt);
439
440int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
441{
442 struct sk_buff *skb = pkt_to_skb(pkt);
443 struct sk_buff *lastskb;
444
445 caif_assert(buf != NULL);
446 if (unlikely(is_erronous(pkt)))
447 return -EPROTO;
448 /* Make sure SKB is writable */
449 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
450 PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
451 return -EPROTO;
452 }
453
454 if (unlikely(skb_linearize(skb) != 0)) {
455 PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
456 return -EPROTO;
457 }
458
459 if (unlikely(skb_tailroom(skb) < buflen)) {
460 PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
461 return -EPROTO;
462 }
463
464 *buf = skb_put(skb, buflen);
465 return 1;
466}
467EXPORT_SYMBOL(cfpkt_raw_append);
468
469int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
470{
471 struct sk_buff *skb = pkt_to_skb(pkt);
472
473 caif_assert(buf != NULL);
474 if (unlikely(is_erronous(pkt)))
475 return -EPROTO;
476
477 if (unlikely(buflen > skb->len)) {
478 PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
479 "- failed\n");
480 return -EPROTO;
481 }
482
483 if (unlikely(buflen > skb_headlen(skb))) {
484 if (unlikely(skb_linearize(skb) != 0)) {
485 PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
486 return -EPROTO;
487 }
488 }
489
490 *buf = skb->data;
491 skb_pull(skb, buflen);
492
493 return 1;
494}
495EXPORT_SYMBOL(cfpkt_raw_extract);
496
497inline bool cfpkt_erroneous(struct cfpkt *pkt)
498{
499 return cfpkt_priv(pkt)->erronous;
500}
501EXPORT_SYMBOL(cfpkt_erroneous);
502
503struct cfpktq *cfpktq_create(void)
504{
505 struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
506 if (!q)
507 return NULL;
508 skb_queue_head_init(&q->head);
509 atomic_set(&q->count, 0);
510 spin_lock_init(&q->lock);
511 return q;
512}
513EXPORT_SYMBOL(cfpktq_create);
514
515void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
516{
517 atomic_inc(&pktq->count);
518 spin_lock(&pktq->lock);
519 skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
520 spin_unlock(&pktq->lock);
521
522}
523EXPORT_SYMBOL(cfpkt_queue);
524
525struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
526{
527 struct cfpkt *tmp;
528 spin_lock(&pktq->lock);
529 tmp = skb_to_pkt(skb_peek(&pktq->head));
530 spin_unlock(&pktq->lock);
531 return tmp;
532}
533EXPORT_SYMBOL(cfpkt_qpeek);
534
535struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
536{
537 struct cfpkt *pkt;
538 spin_lock(&pktq->lock);
539 pkt = skb_to_pkt(skb_dequeue(&pktq->head));
540 if (pkt) {
541 atomic_dec(&pktq->count);
542 caif_assert(atomic_read(&pktq->count) >= 0);
543 }
544 spin_unlock(&pktq->lock);
545 return pkt;
546}
547EXPORT_SYMBOL(cfpkt_dequeue);
548
549int cfpkt_qcount(struct cfpktq *pktq)
550{
551 return atomic_read(&pktq->count);
552}
553EXPORT_SYMBOL(cfpkt_qcount);
554
555struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
556{
557 struct cfpkt *clone;
558 clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
559 /* Free original packet. */
560 cfpkt_destroy(pkt);
561 if (!clone)
562 return NULL;
563 return clone;
564}
565EXPORT_SYMBOL(cfpkt_clone_release);
566
567struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
568{
569 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
570}
571EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
new file mode 100644
index 000000000000..cd2830fec935
--- /dev/null
+++ b/net/caif/cfrfml.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfsrvl.h>
12#include <net/caif/cfpkt.h>
13
14#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
15
16#define RFM_SEGMENTATION_BIT 0x01
17#define RFM_PAYLOAD 0x00
18#define RFM_CMD_BIT 0x80
19#define RFM_FLOW_OFF 0x81
20#define RFM_FLOW_ON 0x80
21#define RFM_SET_PIN 0x82
22#define RFM_CTRL_PKT_SIZE 1
23
24static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
25static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
26static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
27
28struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info)
29{
30 struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
31 if (!rfm) {
32 pr_warning("CAIF: %s(): Out of memory\n", __func__);
33 return NULL;
34 }
35 caif_assert(offsetof(struct cfsrvl, layer) == 0);
36 memset(rfm, 0, sizeof(struct cfsrvl));
37 cfsrvl_init(rfm, channel_id, dev_info);
38 rfm->layer.modemcmd = cfservl_modemcmd;
39 rfm->layer.receive = cfrfml_receive;
40 rfm->layer.transmit = cfrfml_transmit;
41 snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
42 return &rfm->layer;
43}
44
45static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
46{
47 return -EPROTO;
48}
49
50static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
51{
52 u8 tmp;
53 bool segmented;
54 int ret;
55 caif_assert(layr->up != NULL);
56 caif_assert(layr->receive != NULL);
57
58 /*
59 * RFM is taking care of segmentation and stripping of
60 * segmentation bit.
61 */
62 if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
63 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
64 cfpkt_destroy(pkt);
65 return -EPROTO;
66 }
67 segmented = tmp & RFM_SEGMENTATION_BIT;
68 caif_assert(!segmented);
69
70 ret = layr->up->receive(layr->up, pkt);
71 return ret;
72}
73
74static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
75{
76 u8 tmp = 0;
77 int ret;
78 struct cfsrvl *service = container_obj(layr);
79
80 caif_assert(layr->dn != NULL);
81 caif_assert(layr->dn->transmit != NULL);
82
83 if (!cfsrvl_ready(service, &ret))
84 return ret;
85
86 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
87 pr_err("CAIF: %s():Packet too large - size=%d\n",
88 __func__, cfpkt_getlen(pkt));
89 return -EOVERFLOW;
90 }
91 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
92 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
93 return -EPROTO;
94 }
95
96 /* Add info for MUX-layer to route the packet out. */
97 cfpkt_info(pkt)->channel_id = service->layer.id;
98 /*
99 * To optimize alignment, we add up the size of CAIF header before
100 * payload.
101 */
102 cfpkt_info(pkt)->hdr_len = 1;
103 cfpkt_info(pkt)->dev_info = &service->dev_info;
104 ret = layr->dn->transmit(layr->dn, pkt);
105 if (ret < 0)
106 cfpkt_extr_head(pkt, &tmp, 1);
107 return ret;
108}
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
new file mode 100644
index 000000000000..06029ea2da2f
--- /dev/null
+++ b/net/caif/cfserl.c
@@ -0,0 +1,192 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfpkt.h>
12#include <net/caif/cfserl.h>
13
14#define container_obj(layr) ((struct cfserl *) layr)
15
16#define CFSERL_STX 0x02
17#define CAIF_MINIUM_PACKET_SIZE 4
18struct cfserl {
19 struct cflayer layer;
20 struct cfpkt *incomplete_frm;
21 /* Protects parallel processing of incoming packets */
22 spinlock_t sync;
23 bool usestx;
24};
25#define STXLEN(layr) (layr->usestx ? 1 : 0)
26
27static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
28static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
29static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
30 int phyid);
31
32struct cflayer *cfserl_create(int type, int instance, bool use_stx)
33{
34 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
35 if (!this) {
36 pr_warning("CAIF: %s(): Out of memory\n", __func__);
37 return NULL;
38 }
39 caif_assert(offsetof(struct cfserl, layer) == 0);
40 memset(this, 0, sizeof(struct cfserl));
41 this->layer.receive = cfserl_receive;
42 this->layer.transmit = cfserl_transmit;
43 this->layer.ctrlcmd = cfserl_ctrlcmd;
44 this->layer.type = type;
45 this->usestx = use_stx;
46 spin_lock_init(&this->sync);
47 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
48 return &this->layer;
49}
50
51static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
52{
53 struct cfserl *layr = container_obj(l);
54 u16 pkt_len;
55 struct cfpkt *pkt = NULL;
56 struct cfpkt *tail_pkt = NULL;
57 u8 tmp8;
58 u16 tmp;
59 u8 stx = CFSERL_STX;
60 int ret;
61 u16 expectlen = 0;
62 caif_assert(newpkt != NULL);
63 spin_lock(&layr->sync);
64
65 if (layr->incomplete_frm != NULL) {
66
67 layr->incomplete_frm =
68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
69 pkt = layr->incomplete_frm;
70 } else {
71 pkt = newpkt;
72 }
73 layr->incomplete_frm = NULL;
74
75 do {
76 /* Search for STX at start of pkt if STX is used */
77 if (layr->usestx) {
78 cfpkt_extr_head(pkt, &tmp8, 1);
79 if (tmp8 != CFSERL_STX) {
80 while (cfpkt_more(pkt)
81 && tmp8 != CFSERL_STX) {
82 cfpkt_extr_head(pkt, &tmp8, 1);
83 }
84 if (!cfpkt_more(pkt)) {
85 cfpkt_destroy(pkt);
86 layr->incomplete_frm = NULL;
87 spin_unlock(&layr->sync);
88 return -EPROTO;
89 }
90 }
91 }
92
93 pkt_len = cfpkt_getlen(pkt);
94
95 /*
96 * pkt_len is the accumulated length of the packet data
97 * we have received so far.
98 * Exit if frame doesn't hold length.
99 */
100
101 if (pkt_len < 2) {
102 if (layr->usestx)
103 cfpkt_add_head(pkt, &stx, 1);
104 layr->incomplete_frm = pkt;
105 spin_unlock(&layr->sync);
106 return 0;
107 }
108
109 /*
110 * Find length of frame.
111 * expectlen is the length we need for a full frame.
112 */
113 cfpkt_peek_head(pkt, &tmp, 2);
114 expectlen = le16_to_cpu(tmp) + 2;
115 /*
116 * Frame error handling
117 */
118 if (expectlen < CAIF_MINIUM_PACKET_SIZE
119 || expectlen > CAIF_MAX_FRAMESIZE) {
120 if (!layr->usestx) {
121 if (pkt != NULL)
122 cfpkt_destroy(pkt);
123 layr->incomplete_frm = NULL;
124 expectlen = 0;
125 spin_unlock(&layr->sync);
126 return -EPROTO;
127 }
128 continue;
129 }
130
131 if (pkt_len < expectlen) {
132 /* Too little received data */
133 if (layr->usestx)
134 cfpkt_add_head(pkt, &stx, 1);
135 layr->incomplete_frm = pkt;
136 spin_unlock(&layr->sync);
137 return 0;
138 }
139
140 /*
141 * Enough data for at least one frame.
142 * Split the frame, if too long
143 */
144 if (pkt_len > expectlen)
145 tail_pkt = cfpkt_split(pkt, expectlen);
146 else
147 tail_pkt = NULL;
148
149 /* Send the first part of packet upwards.*/
150 spin_unlock(&layr->sync);
151 ret = layr->layer.up->receive(layr->layer.up, pkt);
152 spin_lock(&layr->sync);
153 if (ret == -EILSEQ) {
154 if (layr->usestx) {
155 if (tail_pkt != NULL)
156 pkt = cfpkt_append(pkt, tail_pkt, 0);
157
158 /* Start search for next STX if frame failed */
159 continue;
160 } else {
161 cfpkt_destroy(pkt);
162 pkt = NULL;
163 }
164 }
165
166 pkt = tail_pkt;
167
168 } while (pkt != NULL);
169
170 spin_unlock(&layr->sync);
171 return 0;
172}
173
174static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
175{
176 struct cfserl *layr = container_obj(layer);
177 int ret;
178 u8 tmp8 = CFSERL_STX;
179 if (layr->usestx)
180 cfpkt_add_head(newpkt, &tmp8, 1);
181 ret = layer->dn->transmit(layer->dn, newpkt);
182 if (ret < 0)
183 cfpkt_extr_head(newpkt, &tmp8, 1);
184
185 return ret;
186}
187
188static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
189 int phyid)
190{
191 layr->up->ctrlcmd(layr->up, ctrl, phyid);
192}
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
new file mode 100644
index 000000000000..d470c51c6431
--- /dev/null
+++ b/net/caif/cfsrvl.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/slab.h>
11#include <net/caif/caif_layer.h>
12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h>
14
15#define SRVL_CTRL_PKT_SIZE 1
16#define SRVL_FLOW_OFF 0x81
17#define SRVL_FLOW_ON 0x80
18#define SRVL_SET_PIN 0x82
19#define SRVL_CTRL_PKT_SIZE 1
20
21#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
22
23static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
24 int phyid)
25{
26 struct cfsrvl *service = container_obj(layr);
27 caif_assert(layr->up != NULL);
28 caif_assert(layr->up->ctrlcmd != NULL);
29 switch (ctrl) {
30 case CAIF_CTRLCMD_INIT_RSP:
31 service->open = true;
32 layr->up->ctrlcmd(layr->up, ctrl, phyid);
33 break;
34 case CAIF_CTRLCMD_DEINIT_RSP:
35 case CAIF_CTRLCMD_INIT_FAIL_RSP:
36 service->open = false;
37 layr->up->ctrlcmd(layr->up, ctrl, phyid);
38 break;
39 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
40 if (phyid != service->dev_info.id)
41 break;
42 if (service->modem_flow_on)
43 layr->up->ctrlcmd(layr->up,
44 CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
45 service->phy_flow_on = false;
46 break;
47 case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
48 if (phyid != service->dev_info.id)
49 return;
50 if (service->modem_flow_on) {
51 layr->up->ctrlcmd(layr->up,
52 CAIF_CTRLCMD_FLOW_ON_IND,
53 phyid);
54 }
55 service->phy_flow_on = true;
56 break;
57 case CAIF_CTRLCMD_FLOW_OFF_IND:
58 if (service->phy_flow_on) {
59 layr->up->ctrlcmd(layr->up,
60 CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
61 }
62 service->modem_flow_on = false;
63 break;
64 case CAIF_CTRLCMD_FLOW_ON_IND:
65 if (service->phy_flow_on) {
66 layr->up->ctrlcmd(layr->up,
67 CAIF_CTRLCMD_FLOW_ON_IND, phyid);
68 }
69 service->modem_flow_on = true;
70 break;
71 case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
72 /* In case interface is down, let's fake a remove shutdown */
73 layr->up->ctrlcmd(layr->up,
74 CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
75 break;
76 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
77 layr->up->ctrlcmd(layr->up, ctrl, phyid);
78 break;
79 default:
80 pr_warning("CAIF: %s(): "
81 "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
82 /* We have both modem and phy flow on, send flow on */
83 layr->up->ctrlcmd(layr->up, ctrl, phyid);
84 service->phy_flow_on = true;
85 break;
86 }
87}
88
89static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
90{
91 struct cfsrvl *service = container_obj(layr);
92 caif_assert(layr != NULL);
93 caif_assert(layr->dn != NULL);
94 caif_assert(layr->dn->transmit != NULL);
95 switch (ctrl) {
96 case CAIF_MODEMCMD_FLOW_ON_REQ:
97 {
98 struct cfpkt *pkt;
99 struct caif_payload_info *info;
100 u8 flow_on = SRVL_FLOW_ON;
101 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
102 if (!pkt) {
103 pr_warning("CAIF: %s(): Out of memory\n",
104 __func__);
105 return -ENOMEM;
106 }
107
108 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
109 pr_err("CAIF: %s(): Packet is erroneous!\n",
110 __func__);
111 cfpkt_destroy(pkt);
112 return -EPROTO;
113 }
114 info = cfpkt_info(pkt);
115 info->channel_id = service->layer.id;
116 info->hdr_len = 1;
117 info->dev_info = &service->dev_info;
118 return layr->dn->transmit(layr->dn, pkt);
119 }
120 case CAIF_MODEMCMD_FLOW_OFF_REQ:
121 {
122 struct cfpkt *pkt;
123 struct caif_payload_info *info;
124 u8 flow_off = SRVL_FLOW_OFF;
125 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
126 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
127 pr_err("CAIF: %s(): Packet is erroneous!\n",
128 __func__);
129 cfpkt_destroy(pkt);
130 return -EPROTO;
131 }
132 info = cfpkt_info(pkt);
133 info->channel_id = service->layer.id;
134 info->hdr_len = 1;
135 info->dev_info = &service->dev_info;
136 return layr->dn->transmit(layr->dn, pkt);
137 }
138 default:
139 break;
140 }
141 return -EINVAL;
142}
143
144void cfservl_destroy(struct cflayer *layer)
145{
146 kfree(layer);
147}
148
149void cfsrvl_init(struct cfsrvl *service,
150 u8 channel_id,
151 struct dev_info *dev_info)
152{
153 caif_assert(offsetof(struct cfsrvl, layer) == 0);
154 service->open = false;
155 service->modem_flow_on = true;
156 service->phy_flow_on = true;
157 service->layer.id = channel_id;
158 service->layer.ctrlcmd = cfservl_ctrlcmd;
159 service->layer.modemcmd = cfservl_modemcmd;
160 service->dev_info = *dev_info;
161}
162
163bool cfsrvl_ready(struct cfsrvl *service, int *err)
164{
165 if (service->open && service->modem_flow_on && service->phy_flow_on)
166 return true;
167 if (!service->open) {
168 *err = -ENOTCONN;
169 return false;
170 }
171 caif_assert(!(service->modem_flow_on && service->phy_flow_on));
172 *err = -EAGAIN;
173 return false;
174}
175u8 cfsrvl_getphyid(struct cflayer *layer)
176{
177 struct cfsrvl *servl = container_obj(layer);
178 return servl->dev_info.id;
179}
180
181bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
182{
183 struct cfsrvl *servl = container_obj(layer);
184 return servl->dev_info.id == phyid;
185}
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
new file mode 100644
index 000000000000..5fd2c9ea8b42
--- /dev/null
+++ b/net/caif/cfutill.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <net/caif/caif_layer.h>
12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h>
14
15#define container_obj(layr) ((struct cfsrvl *) layr)
16#define UTIL_PAYLOAD 0x00
17#define UTIL_CMD_BIT 0x80
18#define UTIL_REMOTE_SHUTDOWN 0x82
19#define UTIL_FLOW_OFF 0x81
20#define UTIL_FLOW_ON 0x80
21#define UTIL_CTRL_PKT_SIZE 1
22static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
23static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
24
25struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
26{
27 struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
28 if (!util) {
29 pr_warning("CAIF: %s(): Out of memory\n", __func__);
30 return NULL;
31 }
32 caif_assert(offsetof(struct cfsrvl, layer) == 0);
33 memset(util, 0, sizeof(struct cfsrvl));
34 cfsrvl_init(util, channel_id, dev_info);
35 util->layer.receive = cfutill_receive;
36 util->layer.transmit = cfutill_transmit;
37 snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
38 return &util->layer;
39}
40
41static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
42{
43 u8 cmd = -1;
44 struct cfsrvl *service = container_obj(layr);
45 caif_assert(layr != NULL);
46 caif_assert(layr->up != NULL);
47 caif_assert(layr->up->receive != NULL);
48 caif_assert(layr->up->ctrlcmd != NULL);
49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
50 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
51 cfpkt_destroy(pkt);
52 return -EPROTO;
53 }
54
55 switch (cmd) {
56 case UTIL_PAYLOAD:
57 return layr->up->receive(layr->up, pkt);
58 case UTIL_FLOW_OFF:
59 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
60 cfpkt_destroy(pkt);
61 return 0;
62 case UTIL_FLOW_ON:
63 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
64 cfpkt_destroy(pkt);
65 return 0;
66 case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
67 pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n",
68 __func__);
69 layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
70 service->open = false;
71 cfpkt_destroy(pkt);
72 return 0;
73 default:
74 cfpkt_destroy(pkt);
75 pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n",
76 __func__, cmd, cmd);
77 return -EPROTO;
78 }
79}
80
81static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
82{
83 u8 zero = 0;
84 struct caif_payload_info *info;
85 int ret;
86 struct cfsrvl *service = container_obj(layr);
87 caif_assert(layr != NULL);
88 caif_assert(layr->dn != NULL);
89 caif_assert(layr->dn->transmit != NULL);
90 if (!cfsrvl_ready(service, &ret))
91 return ret;
92
93 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
94 pr_err("CAIF: %s(): packet too large size=%d\n",
95 __func__, cfpkt_getlen(pkt));
96 return -EOVERFLOW;
97 }
98
99 cfpkt_add_head(pkt, &zero, 1);
100 /* Add info for MUX-layer to route the packet out. */
101 info = cfpkt_info(pkt);
102 info->channel_id = service->layer.id;
103 /*
104 * To optimize alignment, we add up the size of CAIF header before
105 * payload.
106 */
107 info->hdr_len = 1;
108 info->dev_info = &service->dev_info;
109 ret = layr->dn->transmit(layr->dn, pkt);
110 if (ret < 0) {
111 u32 tmp32;
112 cfpkt_extr_head(pkt, &tmp32, 4);
113 }
114 return ret;
115}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
new file mode 100644
index 000000000000..0fd827f49491
--- /dev/null
+++ b/net/caif/cfveil.c
@@ -0,0 +1,107 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/slab.h>
9#include <net/caif/caif_layer.h>
10#include <net/caif/cfsrvl.h>
11#include <net/caif/cfpkt.h>
12
13#define VEI_PAYLOAD 0x00
14#define VEI_CMD_BIT 0x80
15#define VEI_FLOW_OFF 0x81
16#define VEI_FLOW_ON 0x80
17#define VEI_SET_PIN 0x82
18#define VEI_CTRL_PKT_SIZE 1
19#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
20
21static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
22static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
23
24struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
25{
26 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
27 if (!vei) {
28 pr_warning("CAIF: %s(): Out of memory\n", __func__);
29 return NULL;
30 }
31 caif_assert(offsetof(struct cfsrvl, layer) == 0);
32 memset(vei, 0, sizeof(struct cfsrvl));
33 cfsrvl_init(vei, channel_id, dev_info);
34 vei->layer.receive = cfvei_receive;
35 vei->layer.transmit = cfvei_transmit;
36 snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
37 return &vei->layer;
38}
39
40static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
41{
42 u8 cmd;
43 int ret;
44 caif_assert(layr->up != NULL);
45 caif_assert(layr->receive != NULL);
46 caif_assert(layr->ctrlcmd != NULL);
47
48
49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
50 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
51 cfpkt_destroy(pkt);
52 return -EPROTO;
53 }
54 switch (cmd) {
55 case VEI_PAYLOAD:
56 ret = layr->up->receive(layr->up, pkt);
57 return ret;
58 case VEI_FLOW_OFF:
59 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
60 cfpkt_destroy(pkt);
61 return 0;
62 case VEI_FLOW_ON:
63 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
64 cfpkt_destroy(pkt);
65 return 0;
66 case VEI_SET_PIN: /* SET RS232 PIN */
67 cfpkt_destroy(pkt);
68 return 0;
69 default: /* SET RS232 PIN */
70 pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n",
71 __func__, cmd, cmd);
72 cfpkt_destroy(pkt);
73 return -EPROTO;
74 }
75}
76
77static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
78{
79 u8 tmp = 0;
80 struct caif_payload_info *info;
81 int ret;
82 struct cfsrvl *service = container_obj(layr);
83 if (!cfsrvl_ready(service, &ret))
84 return ret;
85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL);
87 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
88 pr_warning("CAIF: %s(): Packet too large - size=%d\n",
89 __func__, cfpkt_getlen(pkt));
90 return -EOVERFLOW;
91 }
92
93 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
94 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
95 return -EPROTO;
96 }
97
98 /* Add info-> for MUX-layer to route the packet out. */
99 info = cfpkt_info(pkt);
100 info->channel_id = service->layer.id;
101 info->hdr_len = 1;
102 info->dev_info = &service->dev_info;
103 ret = layr->dn->transmit(layr->dn, pkt);
104 if (ret < 0)
105 cfpkt_extr_head(pkt, &tmp, 1);
106 return ret;
107}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
new file mode 100644
index 000000000000..89ad4ea239f1
--- /dev/null
+++ b/net/caif/cfvidl.c
@@ -0,0 +1,65 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <net/caif/caif_layer.h>
12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h>
14
15#define container_obj(layr) ((struct cfsrvl *) layr)
16
17static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt);
18static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
19
20struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
21{
22 struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
23 if (!vid) {
24 pr_warning("CAIF: %s(): Out of memory\n", __func__);
25 return NULL;
26 }
27 caif_assert(offsetof(struct cfsrvl, layer) == 0);
28
29 memset(vid, 0, sizeof(struct cfsrvl));
30 cfsrvl_init(vid, channel_id, dev_info);
31 vid->layer.receive = cfvidl_receive;
32 vid->layer.transmit = cfvidl_transmit;
33 snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
34 return &vid->layer;
35}
36
37static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
38{
39 u32 videoheader;
40 if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
41 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
42 cfpkt_destroy(pkt);
43 return -EPROTO;
44 }
45 return layr->up->receive(layr->up, pkt);
46}
47
48static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
49{
50 struct cfsrvl *service = container_obj(layr);
51 struct caif_payload_info *info;
52 u32 videoheader = 0;
53 int ret;
54 if (!cfsrvl_ready(service, &ret))
55 return ret;
56 cfpkt_add_head(pkt, &videoheader, 4);
57 /* Add info for MUX-layer to route the packet out */
58 info = cfpkt_info(pkt);
59 info->channel_id = service->layer.id;
60 info->dev_info = &service->dev_info;
61 ret = layr->dn->transmit(layr->dn, pkt);
62 if (ret < 0)
63 cfpkt_extr_head(pkt, &videoheader, 4);
64 return ret;
65}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
new file mode 100644
index 000000000000..f622ff1d39ba
--- /dev/null
+++ b/net/caif/chnl_net.c
@@ -0,0 +1,451 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Authors: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * Daniel Martensson / Daniel.Martensson@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#include <linux/version.h>
9#include <linux/fs.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <linux/if_ether.h>
14#include <linux/moduleparam.h>
15#include <linux/ip.h>
16#include <linux/sched.h>
17#include <linux/sockios.h>
18#include <linux/caif/if_caif.h>
19#include <net/rtnetlink.h>
20#include <net/caif/caif_layer.h>
21#include <net/caif/cfcnfg.h>
22#include <net/caif/cfpkt.h>
23#include <net/caif/caif_dev.h>
24
25#define CAIF_CONNECT_TIMEOUT 30
26#define SIZE_MTU 1500
27#define SIZE_MTU_MAX 4080
28#define SIZE_MTU_MIN 68
29#define CAIF_NET_DEFAULT_QUEUE_LEN 500
30
31#undef pr_debug
32#define pr_debug pr_warning
33
34/*This list is protected by the rtnl lock. */
35static LIST_HEAD(chnl_net_list);
36
37MODULE_LICENSE("GPL");
38MODULE_ALIAS_RTNL_LINK("caif");
39
40struct chnl_net {
41 struct cflayer chnl;
42 struct net_device_stats stats;
43 struct caif_connect_request conn_req;
44 struct list_head list_field;
45 struct net_device *netdev;
46 char name[256];
47 wait_queue_head_t netmgmt_wq;
48 /* Flow status to remember and control the transmission. */
49 bool flowenabled;
50 bool pending_close;
51};
52
53static void robust_list_del(struct list_head *delete_node)
54{
55 struct list_head *list_node;
56 struct list_head *n;
57 ASSERT_RTNL();
58 list_for_each_safe(list_node, n, &chnl_net_list) {
59 if (list_node == delete_node) {
60 list_del(list_node);
61 break;
62 }
63 }
64}
65
66static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
67{
68 struct sk_buff *skb;
69 struct chnl_net *priv = NULL;
70 int pktlen;
71 int err = 0;
72
73 priv = container_of(layr, struct chnl_net, chnl);
74
75 if (!priv)
76 return -EINVAL;
77
78 /* Get length of CAIF packet. */
79 pktlen = cfpkt_getlen(pkt);
80
81 skb = (struct sk_buff *) cfpkt_tonative(pkt);
82 /* Pass some minimum information and
83 * send the packet to the net stack.
84 */
85 skb->dev = priv->netdev;
86 skb->protocol = htons(ETH_P_IP);
87
88 /* If we change the header in loop mode, the checksum is corrupted. */
89 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
90 skb->ip_summed = CHECKSUM_UNNECESSARY;
91 else
92 skb->ip_summed = CHECKSUM_NONE;
93
94 /* FIXME: Drivers should call this in tasklet context. */
95 if (in_interrupt())
96 netif_rx(skb);
97 else
98 netif_rx_ni(skb);
99
100 /* Update statistics. */
101 priv->netdev->stats.rx_packets++;
102 priv->netdev->stats.rx_bytes += pktlen;
103
104 return err;
105}
106
107static int delete_device(struct chnl_net *dev)
108{
109 ASSERT_RTNL();
110 if (dev->netdev)
111 unregister_netdevice(dev->netdev);
112 return 0;
113}
114
115static void close_work(struct work_struct *work)
116{
117 struct chnl_net *dev = NULL;
118 struct list_head *list_node;
119 struct list_head *_tmp;
120 rtnl_lock();
121 list_for_each_safe(list_node, _tmp, &chnl_net_list) {
122 dev = list_entry(list_node, struct chnl_net, list_field);
123 if (!dev->pending_close)
124 continue;
125 list_del(list_node);
126 delete_device(dev);
127 }
128 rtnl_unlock();
129}
130static DECLARE_WORK(close_worker, close_work);
131
132static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
133 int phyid)
134{
135 struct chnl_net *priv;
136 pr_debug("CAIF: %s(): NET flowctrl func called flow: %s.\n",
137 __func__,
138 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
139 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
140 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
141 flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" :
142 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" :
143 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
144 "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND");
145
146 priv = container_of(layr, struct chnl_net, chnl);
147
148 switch (flow) {
149 case CAIF_CTRLCMD_FLOW_OFF_IND:
150 case CAIF_CTRLCMD_DEINIT_RSP:
151 case CAIF_CTRLCMD_INIT_FAIL_RSP:
152 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
153 priv->flowenabled = false;
154 netif_tx_disable(priv->netdev);
155 pr_warning("CAIF: %s(): done\n", __func__);
156 priv->pending_close = 1;
157 schedule_work(&close_worker);
158 break;
159 case CAIF_CTRLCMD_FLOW_ON_IND:
160 case CAIF_CTRLCMD_INIT_RSP:
161 priv->flowenabled = true;
162 netif_wake_queue(priv->netdev);
163 wake_up_interruptible(&priv->netmgmt_wq);
164 break;
165 default:
166 break;
167 }
168}
169
170static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
171{
172 struct chnl_net *priv;
173 struct cfpkt *pkt = NULL;
174 int len;
175 int result = -1;
176 /* Get our private data. */
177 priv = netdev_priv(dev);
178
179 if (skb->len > priv->netdev->mtu) {
180 pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__);
181 return -ENOSPC;
182 }
183
184 if (!priv->flowenabled) {
185 pr_debug("CAIF: %s(): dropping packets flow off\n", __func__);
186 return NETDEV_TX_BUSY;
187 }
188
189 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
190 swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
191
192 /* Store original SKB length. */
193 len = skb->len;
194
195 pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
196
197 pr_debug("CAIF: %s(): transmit inst %s %d,%p\n",
198 __func__, dev->name, priv->chnl.dn->id, &priv->chnl.dn);
199
200 /* Send the packet down the stack. */
201 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
202 if (result) {
203 if (result == -EAGAIN)
204 result = NETDEV_TX_BUSY;
205 return result;
206 }
207
208 /* Update statistics. */
209 dev->stats.tx_packets++;
210 dev->stats.tx_bytes += len;
211
212 return NETDEV_TX_OK;
213}
214
215static int chnl_net_open(struct net_device *dev)
216{
217 struct chnl_net *priv = NULL;
218 int result = -1;
219 ASSERT_RTNL();
220
221 priv = netdev_priv(dev);
222 pr_debug("CAIF: %s(): dev name: %s\n", __func__, priv->name);
223
224 if (!priv) {
225 pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
226 return -ENODEV;
227 }
228 result = caif_connect_client(&priv->conn_req, &priv->chnl);
229 if (result != 0) {
230 pr_debug("CAIF: %s(): err: "
231 "Unable to register and open device, Err:%d\n",
232 __func__,
233 result);
234 return -ENODEV;
235 }
236 result = wait_event_interruptible(priv->netmgmt_wq, priv->flowenabled);
237
238 if (result == -ERESTARTSYS) {
239 pr_debug("CAIF: %s(): wait_event_interruptible"
240 " woken by a signal\n", __func__);
241 return -ERESTARTSYS;
242 } else
243 pr_debug("CAIF: %s(): Flow on recieved\n", __func__);
244
245 return 0;
246}
247
248static int chnl_net_stop(struct net_device *dev)
249{
250 struct chnl_net *priv;
251 int result = -1;
252 ASSERT_RTNL();
253 priv = netdev_priv(dev);
254
255 result = caif_disconnect_client(&priv->chnl);
256 if (result != 0) {
257 pr_debug("CAIF: %s(): chnl_net_stop: err: "
258 "Unable to STOP device, Err:%d\n",
259 __func__, result);
260 return -EBUSY;
261 }
262 result = wait_event_interruptible(priv->netmgmt_wq,
263 !priv->flowenabled);
264
265 if (result == -ERESTARTSYS) {
266 pr_debug("CAIF: %s(): wait_event_interruptible woken by"
267 " signal, signal_pending(current) = %d\n",
268 __func__,
269 signal_pending(current));
270 } else {
271 pr_debug("CAIF: %s(): disconnect received\n", __func__);
272
273 }
274
275 return 0;
276}
277
278static int chnl_net_init(struct net_device *dev)
279{
280 struct chnl_net *priv;
281 ASSERT_RTNL();
282 priv = netdev_priv(dev);
283 strncpy(priv->name, dev->name, sizeof(priv->name));
284 return 0;
285}
286
287static void chnl_net_uninit(struct net_device *dev)
288{
289 struct chnl_net *priv;
290 ASSERT_RTNL();
291 priv = netdev_priv(dev);
292 robust_list_del(&priv->list_field);
293}
294
295static const struct net_device_ops netdev_ops = {
296 .ndo_open = chnl_net_open,
297 .ndo_stop = chnl_net_stop,
298 .ndo_init = chnl_net_init,
299 .ndo_uninit = chnl_net_uninit,
300 .ndo_start_xmit = chnl_net_start_xmit,
301};
302
303static void ipcaif_net_setup(struct net_device *dev)
304{
305 struct chnl_net *priv;
306 dev->netdev_ops = &netdev_ops;
307 dev->destructor = free_netdev;
308 dev->flags |= IFF_NOARP;
309 dev->flags |= IFF_POINTOPOINT;
310 dev->needed_headroom = CAIF_NEEDED_HEADROOM;
311 dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
312 dev->mtu = SIZE_MTU;
313 dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
314
315 priv = netdev_priv(dev);
316 priv->chnl.receive = chnl_recv_cb;
317 priv->chnl.ctrlcmd = chnl_flowctrl_cb;
318 priv->netdev = dev;
319 priv->conn_req.protocol = CAIFPROTO_DATAGRAM;
320 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
321 priv->conn_req.priority = CAIF_PRIO_LOW;
322 /* Insert illegal value */
323 priv->conn_req.sockaddr.u.dgm.connection_id = -1;
324 priv->flowenabled = false;
325
326 ASSERT_RTNL();
327 init_waitqueue_head(&priv->netmgmt_wq);
328 list_add(&priv->list_field, &chnl_net_list);
329}
330
331
332static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
333{
334 struct chnl_net *priv;
335 u8 loop;
336 priv = netdev_priv(dev);
337 NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
338 priv->conn_req.sockaddr.u.dgm.connection_id);
339 NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
340 priv->conn_req.sockaddr.u.dgm.connection_id);
341 loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
342 NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
343
344
345 return 0;
346nla_put_failure:
347 return -EMSGSIZE;
348
349}
350
351static void caif_netlink_parms(struct nlattr *data[],
352 struct caif_connect_request *conn_req)
353{
354 if (!data) {
355 pr_warning("CAIF: %s: no params data found\n", __func__);
356 return;
357 }
358 if (data[IFLA_CAIF_IPV4_CONNID])
359 conn_req->sockaddr.u.dgm.connection_id =
360 nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]);
361 if (data[IFLA_CAIF_IPV6_CONNID])
362 conn_req->sockaddr.u.dgm.connection_id =
363 nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]);
364 if (data[IFLA_CAIF_LOOPBACK]) {
365 if (nla_get_u8(data[IFLA_CAIF_LOOPBACK]))
366 conn_req->protocol = CAIFPROTO_DATAGRAM_LOOP;
367 else
368 conn_req->protocol = CAIFPROTO_DATAGRAM;
369 }
370}
371
372static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
373 struct nlattr *tb[], struct nlattr *data[])
374{
375 int ret;
376 struct chnl_net *caifdev;
377 ASSERT_RTNL();
378 caifdev = netdev_priv(dev);
379 caif_netlink_parms(data, &caifdev->conn_req);
380 ret = register_netdevice(dev);
381 if (ret)
382 pr_warning("CAIF: %s(): device rtml registration failed\n",
383 __func__);
384 return ret;
385}
386
387static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[],
388 struct nlattr *data[])
389{
390 struct chnl_net *caifdev;
391 ASSERT_RTNL();
392 caifdev = netdev_priv(dev);
393 caif_netlink_parms(data, &caifdev->conn_req);
394 netdev_state_change(dev);
395 return 0;
396}
397
398static size_t ipcaif_get_size(const struct net_device *dev)
399{
400 return
401 /* IFLA_CAIF_IPV4_CONNID */
402 nla_total_size(4) +
403 /* IFLA_CAIF_IPV6_CONNID */
404 nla_total_size(4) +
405 /* IFLA_CAIF_LOOPBACK */
406 nla_total_size(2) +
407 0;
408}
409
410static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
411 [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 },
412 [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 },
413 [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 }
414};
415
416
417static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
418 .kind = "caif",
419 .priv_size = sizeof(struct chnl_net),
420 .setup = ipcaif_net_setup,
421 .maxtype = IFLA_CAIF_MAX,
422 .policy = ipcaif_policy,
423 .newlink = ipcaif_newlink,
424 .changelink = ipcaif_changelink,
425 .get_size = ipcaif_get_size,
426 .fill_info = ipcaif_fill_info,
427
428};
429
430static int __init chnl_init_module(void)
431{
432 return rtnl_link_register(&ipcaif_link_ops);
433}
434
435static void __exit chnl_exit_module(void)
436{
437 struct chnl_net *dev = NULL;
438 struct list_head *list_node;
439 struct list_head *_tmp;
440 rtnl_link_unregister(&ipcaif_link_ops);
441 rtnl_lock();
442 list_for_each_safe(list_node, _tmp, &chnl_net_list) {
443 dev = list_entry(list_node, struct chnl_net, list_field);
444 list_del(list_node);
445 delete_device(dev);
446 }
447 rtnl_unlock();
448}
449
450module_init(chnl_init_module);
451module_exit(chnl_exit_module);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index e32af52238a2..907dc871fac8 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -56,6 +56,7 @@
56#include <linux/can.h> 56#include <linux/can.h>
57#include <linux/can/core.h> 57#include <linux/can/core.h>
58#include <linux/can/bcm.h> 58#include <linux/can/bcm.h>
59#include <linux/slab.h>
59#include <net/sock.h> 60#include <net/sock.h>
60#include <net/net_namespace.h> 61#include <net/net_namespace.h>
61 62
@@ -1478,6 +1479,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1478 struct sock *sk = sock->sk; 1479 struct sock *sk = sock->sk;
1479 struct bcm_sock *bo = bcm_sk(sk); 1480 struct bcm_sock *bo = bcm_sk(sk);
1480 1481
1482 if (len < sizeof(*addr))
1483 return -EINVAL;
1484
1481 if (bo->bound) 1485 if (bo->bound)
1482 return -EISCONN; 1486 return -EISCONN;
1483 1487
diff --git a/net/can/raw.c b/net/can/raw.c
index abca920440b5..da99cf153b33 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -45,6 +45,7 @@
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/uio.h> 46#include <linux/uio.h>
47#include <linux/net.h> 47#include <linux/net.h>
48#include <linux/slab.h>
48#include <linux/netdevice.h> 49#include <linux/netdevice.h>
49#include <linux/socket.h> 50#include <linux/socket.h>
50#include <linux/if_arp.h> 51#include <linux/if_arp.h>
@@ -444,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
444 return -EFAULT; 445 return -EFAULT;
445 } 446 }
446 } else if (count == 1) { 447 } else if (count == 1) {
447 if (copy_from_user(&sfilter, optval, optlen)) 448 if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
448 return -EFAULT; 449 return -EFAULT;
449 } 450 }
450 451
diff --git a/net/compat.c b/net/compat.c
index a1fb1b079a82..ec24d9edb025 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/gfp.h>
15#include <linux/fs.h> 16#include <linux/fs.h>
16#include <linux/types.h> 17#include <linux/types.h>
17#include <linux/file.h> 18#include <linux/file.h>
diff --git a/net/core/Makefile b/net/core/Makefile
index 08791ac3e05a..51c3eec850ef 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o
12 12
13obj-$(CONFIG_XFRM) += flow.o 13obj-$(CONFIG_XFRM) += flow.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 95c2e0840d0d..2dccd4ee591b 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -48,6 +48,7 @@
48#include <linux/poll.h> 48#include <linux/poll.h>
49#include <linux/highmem.h> 49#include <linux/highmem.h>
50#include <linux/spinlock.h> 50#include <linux/spinlock.h>
51#include <linux/slab.h>
51 52
52#include <net/protocol.h> 53#include <net/protocol.h>
53#include <linux/skbuff.h> 54#include <linux/skbuff.h>
diff --git a/net/core/dev.c b/net/core/dev.c
index 17b168671501..b31d5d69a467 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -80,6 +80,7 @@
80#include <linux/types.h> 80#include <linux/types.h>
81#include <linux/kernel.h> 81#include <linux/kernel.h>
82#include <linux/hash.h> 82#include <linux/hash.h>
83#include <linux/slab.h>
83#include <linux/sched.h> 84#include <linux/sched.h>
84#include <linux/mutex.h> 85#include <linux/mutex.h>
85#include <linux/string.h> 86#include <linux/string.h>
@@ -129,6 +130,7 @@
129#include <linux/jhash.h> 130#include <linux/jhash.h>
130#include <linux/random.h> 131#include <linux/random.h>
131#include <trace/events/napi.h> 132#include <trace/events/napi.h>
133#include <linux/pci.h>
132 134
133#include "net-sysfs.h" 135#include "net-sysfs.h"
134 136
@@ -206,6 +208,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 208 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
207} 209}
208 210
211static inline void rps_lock(struct softnet_data *sd)
212{
213#ifdef CONFIG_RPS
214 spin_lock(&sd->input_pkt_queue.lock);
215#endif
216}
217
218static inline void rps_unlock(struct softnet_data *sd)
219{
220#ifdef CONFIG_RPS
221 spin_unlock(&sd->input_pkt_queue.lock);
222#endif
223}
224
209/* Device list insertion */ 225/* Device list insertion */
210static int list_netdevice(struct net_device *dev) 226static int list_netdevice(struct net_device *dev)
211{ 227{
@@ -248,7 +264,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
248 * queue in the local softnet handler. 264 * queue in the local softnet handler.
249 */ 265 */
250 266
251DEFINE_PER_CPU(struct softnet_data, softnet_data); 267DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
252EXPORT_PER_CPU_SYMBOL(softnet_data); 268EXPORT_PER_CPU_SYMBOL(softnet_data);
253 269
254#ifdef CONFIG_LOCKDEP 270#ifdef CONFIG_LOCKDEP
@@ -772,14 +788,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype);
772 788
773struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 789struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
774{ 790{
775 struct net_device *dev; 791 struct net_device *dev, *ret = NULL;
776 792
777 rtnl_lock(); 793 rcu_read_lock();
778 dev = __dev_getfirstbyhwtype(net, type); 794 for_each_netdev_rcu(net, dev)
779 if (dev) 795 if (dev->type == type) {
780 dev_hold(dev); 796 dev_hold(dev);
781 rtnl_unlock(); 797 ret = dev;
782 return dev; 798 break;
799 }
800 rcu_read_unlock();
801 return ret;
783} 802}
784EXPORT_SYMBOL(dev_getfirstbyhwtype); 803EXPORT_SYMBOL(dev_getfirstbyhwtype);
785 804
@@ -1084,9 +1103,9 @@ void netdev_state_change(struct net_device *dev)
1084} 1103}
1085EXPORT_SYMBOL(netdev_state_change); 1104EXPORT_SYMBOL(netdev_state_change);
1086 1105
1087void netdev_bonding_change(struct net_device *dev, unsigned long event) 1106int netdev_bonding_change(struct net_device *dev, unsigned long event)
1088{ 1107{
1089 call_netdevice_notifiers(event, dev); 1108 return call_netdevice_notifiers(event, dev);
1090} 1109}
1091EXPORT_SYMBOL(netdev_bonding_change); 1110EXPORT_SYMBOL(netdev_bonding_change);
1092 1111
@@ -1416,6 +1435,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
1416 1435
1417int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1436int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1418{ 1437{
1438 ASSERT_RTNL();
1419 return raw_notifier_call_chain(&netdev_chain, val, dev); 1439 return raw_notifier_call_chain(&netdev_chain, val, dev);
1420} 1440}
1421 1441
@@ -1783,18 +1803,27 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
1783 * 2. No high memory really exists on this machine. 1803 * 2. No high memory really exists on this machine.
1784 */ 1804 */
1785 1805
1786static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 1806static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1787{ 1807{
1788#ifdef CONFIG_HIGHMEM 1808#ifdef CONFIG_HIGHMEM
1789 int i; 1809 int i;
1810 if (!(dev->features & NETIF_F_HIGHDMA)) {
1811 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1812 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1813 return 1;
1814 }
1790 1815
1791 if (dev->features & NETIF_F_HIGHDMA) 1816 if (PCI_DMA_BUS_IS_PHYS) {
1792 return 0; 1817 struct device *pdev = dev->dev.parent;
1793
1794 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1795 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1796 return 1;
1797 1818
1819 if (!pdev)
1820 return 0;
1821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1822 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1823 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1824 return 1;
1825 }
1826 }
1798#endif 1827#endif
1799 return 0; 1828 return 0;
1800} 1829}
@@ -1852,6 +1881,17 @@ static int dev_gso_segment(struct sk_buff *skb)
1852 return 0; 1881 return 0;
1853} 1882}
1854 1883
1884/*
1885 * Try to orphan skb early, right before transmission by the device.
1886 * We cannot orphan skb if tx timestamp is requested, since
1887 * drivers need to call skb_tstamp_tx() to send the timestamp.
1888 */
1889static inline void skb_orphan_try(struct sk_buff *skb)
1890{
1891 if (!skb_tx(skb)->flags)
1892 skb_orphan(skb);
1893}
1894
1855int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1895int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1856 struct netdev_queue *txq) 1896 struct netdev_queue *txq)
1857{ 1897{
@@ -1876,23 +1916,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1876 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 1916 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1877 skb_dst_drop(skb); 1917 skb_dst_drop(skb);
1878 1918
1919 skb_orphan_try(skb);
1879 rc = ops->ndo_start_xmit(skb, dev); 1920 rc = ops->ndo_start_xmit(skb, dev);
1880 if (rc == NETDEV_TX_OK) 1921 if (rc == NETDEV_TX_OK)
1881 txq_trans_update(txq); 1922 txq_trans_update(txq);
1882 /*
1883 * TODO: if skb_orphan() was called by
1884 * dev->hard_start_xmit() (for example, the unmodified
1885 * igb driver does that; bnx2 doesn't), then
1886 * skb_tx_software_timestamp() will be unable to send
1887 * back the time stamp.
1888 *
1889 * How can this be prevented? Always create another
1890 * reference to the socket before calling
1891 * dev->hard_start_xmit()? Prevent that skb_orphan()
1892 * does anything in dev->hard_start_xmit() by clearing
1893 * the skb destructor before the call and restoring it
1894 * afterwards, then doing the skb_orphan() ourselves?
1895 */
1896 return rc; 1923 return rc;
1897 } 1924 }
1898 1925
@@ -1910,6 +1937,7 @@ gso:
1910 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 1937 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1911 skb_dst_drop(nskb); 1938 skb_dst_drop(nskb);
1912 1939
1940 skb_orphan_try(nskb);
1913 rc = ops->ndo_start_xmit(nskb, dev); 1941 rc = ops->ndo_start_xmit(nskb, dev);
1914 if (unlikely(rc != NETDEV_TX_OK)) { 1942 if (unlikely(rc != NETDEV_TX_OK)) {
1915 if (rc & ~NETDEV_TX_MASK) 1943 if (rc & ~NETDEV_TX_MASK)
@@ -1947,7 +1975,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1947 if (skb->sk && skb->sk->sk_hash) 1975 if (skb->sk && skb->sk->sk_hash)
1948 hash = skb->sk->sk_hash; 1976 hash = skb->sk->sk_hash;
1949 else 1977 else
1950 hash = skb->protocol; 1978 hash = (__force u16) skb->protocol;
1951 1979
1952 hash = jhash_1word(hash, hashrnd); 1980 hash = jhash_1word(hash, hashrnd);
1953 1981
@@ -1959,9 +1987,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1959{ 1987{
1960 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 1988 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1961 if (net_ratelimit()) { 1989 if (net_ratelimit()) {
1962 netdev_warn(dev, "selects TX queue %d, but " 1990 pr_warning("%s selects TX queue %d, but "
1963 "real number of TX queues is %d\n", 1991 "real number of TX queues is %d\n",
1964 queue_index, dev->real_num_tx_queues); 1992 dev->name, queue_index, dev->real_num_tx_queues);
1965 } 1993 }
1966 return 0; 1994 return 0;
1967 } 1995 }
@@ -1987,7 +2015,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1987 if (dev->real_num_tx_queues > 1) 2015 if (dev->real_num_tx_queues > 1)
1988 queue_index = skb_tx_hash(dev, skb); 2016 queue_index = skb_tx_hash(dev, skb);
1989 2017
1990 if (sk && sk->sk_dst_cache) 2018 if (sk && rcu_dereference_check(sk->sk_dst_cache, 1))
1991 sk_tx_queue_set(sk, queue_index); 2019 sk_tx_queue_set(sk, queue_index);
1992 } 2020 }
1993 } 2021 }
@@ -2174,29 +2202,38 @@ int weight_p __read_mostly = 64; /* old backlog weight */
2174 2202
2175DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; 2203DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2176 2204
2205#ifdef CONFIG_RPS
2206
2207/* One global table that all flow-based protocols share. */
2208struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2209EXPORT_SYMBOL(rps_sock_flow_table);
2210
2177/* 2211/*
2178 * get_rps_cpu is called from netif_receive_skb and returns the target 2212 * get_rps_cpu is called from netif_receive_skb and returns the target
2179 * CPU from the RPS map of the receiving queue for a given skb. 2213 * CPU from the RPS map of the receiving queue for a given skb.
2214 * rcu_read_lock must be held on entry.
2180 */ 2215 */
2181static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) 2216static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2217 struct rps_dev_flow **rflowp)
2182{ 2218{
2183 struct ipv6hdr *ip6; 2219 struct ipv6hdr *ip6;
2184 struct iphdr *ip; 2220 struct iphdr *ip;
2185 struct netdev_rx_queue *rxqueue; 2221 struct netdev_rx_queue *rxqueue;
2186 struct rps_map *map; 2222 struct rps_map *map;
2223 struct rps_dev_flow_table *flow_table;
2224 struct rps_sock_flow_table *sock_flow_table;
2187 int cpu = -1; 2225 int cpu = -1;
2188 u8 ip_proto; 2226 u8 ip_proto;
2227 u16 tcpu;
2189 u32 addr1, addr2, ports, ihl; 2228 u32 addr1, addr2, ports, ihl;
2190 2229
2191 rcu_read_lock();
2192
2193 if (skb_rx_queue_recorded(skb)) { 2230 if (skb_rx_queue_recorded(skb)) {
2194 u16 index = skb_get_rx_queue(skb); 2231 u16 index = skb_get_rx_queue(skb);
2195 if (unlikely(index >= dev->num_rx_queues)) { 2232 if (unlikely(index >= dev->num_rx_queues)) {
2196 if (net_ratelimit()) { 2233 if (net_ratelimit()) {
2197 netdev_warn(dev, "received packet on queue " 2234 pr_warning("%s received packet on queue "
2198 "%u, but number of RX queues is %u\n", 2235 "%u, but number of RX queues is %u\n",
2199 index, dev->num_rx_queues); 2236 dev->name, index, dev->num_rx_queues);
2200 } 2237 }
2201 goto done; 2238 goto done;
2202 } 2239 }
@@ -2204,7 +2241,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2204 } else 2241 } else
2205 rxqueue = dev->_rx; 2242 rxqueue = dev->_rx;
2206 2243
2207 if (!rxqueue->rps_map) 2244 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2208 goto done; 2245 goto done;
2209 2246
2210 if (skb->rxhash) 2247 if (skb->rxhash)
@@ -2217,8 +2254,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2217 2254
2218 ip = (struct iphdr *) skb->data; 2255 ip = (struct iphdr *) skb->data;
2219 ip_proto = ip->protocol; 2256 ip_proto = ip->protocol;
2220 addr1 = ip->saddr; 2257 addr1 = (__force u32) ip->saddr;
2221 addr2 = ip->daddr; 2258 addr2 = (__force u32) ip->daddr;
2222 ihl = ip->ihl; 2259 ihl = ip->ihl;
2223 break; 2260 break;
2224 case __constant_htons(ETH_P_IPV6): 2261 case __constant_htons(ETH_P_IPV6):
@@ -2227,8 +2264,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2227 2264
2228 ip6 = (struct ipv6hdr *) skb->data; 2265 ip6 = (struct ipv6hdr *) skb->data;
2229 ip_proto = ip6->nexthdr; 2266 ip_proto = ip6->nexthdr;
2230 addr1 = ip6->saddr.s6_addr32[3]; 2267 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2231 addr2 = ip6->daddr.s6_addr32[3]; 2268 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2232 ihl = (40 >> 2); 2269 ihl = (40 >> 2);
2233 break; 2270 break;
2234 default: 2271 default:
@@ -2243,22 +2280,72 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2243 case IPPROTO_AH: 2280 case IPPROTO_AH:
2244 case IPPROTO_SCTP: 2281 case IPPROTO_SCTP:
2245 case IPPROTO_UDPLITE: 2282 case IPPROTO_UDPLITE:
2246 if (pskb_may_pull(skb, (ihl * 4) + 4)) 2283 if (pskb_may_pull(skb, (ihl * 4) + 4)) {
2247 ports = *((u32 *) (skb->data + (ihl * 4))); 2284 __be16 *hports = (__be16 *) (skb->data + (ihl * 4));
2285 u32 sport, dport;
2286
2287 sport = (__force u16) hports[0];
2288 dport = (__force u16) hports[1];
2289 if (dport < sport)
2290 swap(sport, dport);
2291 ports = (sport << 16) + dport;
2292 }
2248 break; 2293 break;
2249 2294
2250 default: 2295 default:
2251 break; 2296 break;
2252 } 2297 }
2253 2298
2299 /* get a consistent hash (same value on both flow directions) */
2300 if (addr2 < addr1)
2301 swap(addr1, addr2);
2254 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd); 2302 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd);
2255 if (!skb->rxhash) 2303 if (!skb->rxhash)
2256 skb->rxhash = 1; 2304 skb->rxhash = 1;
2257 2305
2258got_hash: 2306got_hash:
2307 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2308 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2309 if (flow_table && sock_flow_table) {
2310 u16 next_cpu;
2311 struct rps_dev_flow *rflow;
2312
2313 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2314 tcpu = rflow->cpu;
2315
2316 next_cpu = sock_flow_table->ents[skb->rxhash &
2317 sock_flow_table->mask];
2318
2319 /*
2320 * If the desired CPU (where last recvmsg was done) is
2321 * different from current CPU (one in the rx-queue flow
2322 * table entry), switch if one of the following holds:
2323 * - Current CPU is unset (equal to RPS_NO_CPU).
2324 * - Current CPU is offline.
2325 * - The current CPU's queue tail has advanced beyond the
2326 * last packet that was enqueued using this table entry.
2327 * This guarantees that all previous packets for the flow
2328 * have been dequeued, thus preserving in order delivery.
2329 */
2330 if (unlikely(tcpu != next_cpu) &&
2331 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2332 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2333 rflow->last_qtail)) >= 0)) {
2334 tcpu = rflow->cpu = next_cpu;
2335 if (tcpu != RPS_NO_CPU)
2336 rflow->last_qtail = per_cpu(softnet_data,
2337 tcpu).input_queue_head;
2338 }
2339 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2340 *rflowp = rflow;
2341 cpu = tcpu;
2342 goto done;
2343 }
2344 }
2345
2259 map = rcu_dereference(rxqueue->rps_map); 2346 map = rcu_dereference(rxqueue->rps_map);
2260 if (map) { 2347 if (map) {
2261 u16 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 2348 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2262 2349
2263 if (cpu_online(tcpu)) { 2350 if (cpu_online(tcpu)) {
2264 cpu = tcpu; 2351 cpu = tcpu;
@@ -2267,72 +2354,78 @@ got_hash:
2267 } 2354 }
2268 2355
2269done: 2356done:
2270 rcu_read_unlock();
2271 return cpu; 2357 return cpu;
2272} 2358}
2273 2359
2274/*
2275 * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled
2276 * to be sent to kick remote softirq processing. There are two masks since
2277 * the sending of IPIs must be done with interrupts enabled. The select field
2278 * indicates the current mask that enqueue_backlog uses to schedule IPIs.
2279 * select is flipped before net_rps_action is called while still under lock,
2280 * net_rps_action then uses the non-selected mask to send the IPIs and clears
2281 * it without conflicting with enqueue_backlog operation.
2282 */
2283struct rps_remote_softirq_cpus {
2284 cpumask_t mask[2];
2285 int select;
2286};
2287static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus);
2288
2289/* Called from hardirq (IPI) context */ 2360/* Called from hardirq (IPI) context */
2290static void trigger_softirq(void *data) 2361static void rps_trigger_softirq(void *data)
2291{ 2362{
2292 struct softnet_data *queue = data; 2363 struct softnet_data *sd = data;
2293 __napi_schedule(&queue->backlog); 2364
2365 __napi_schedule(&sd->backlog);
2294 __get_cpu_var(netdev_rx_stat).received_rps++; 2366 __get_cpu_var(netdev_rx_stat).received_rps++;
2295} 2367}
2296 2368
2369#endif /* CONFIG_RPS */
2370
2371/*
2372 * Check if this softnet_data structure is another cpu one
2373 * If yes, queue it to our IPI list and return 1
2374 * If no, return 0
2375 */
2376static int rps_ipi_queued(struct softnet_data *sd)
2377{
2378#ifdef CONFIG_RPS
2379 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2380
2381 if (sd != mysd) {
2382 sd->rps_ipi_next = mysd->rps_ipi_list;
2383 mysd->rps_ipi_list = sd;
2384
2385 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2386 return 1;
2387 }
2388#endif /* CONFIG_RPS */
2389 return 0;
2390}
2391
2297/* 2392/*
2298 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 2393 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2299 * queue (may be a remote CPU queue). 2394 * queue (may be a remote CPU queue).
2300 */ 2395 */
2301static int enqueue_to_backlog(struct sk_buff *skb, int cpu) 2396static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2397 unsigned int *qtail)
2302{ 2398{
2303 struct softnet_data *queue; 2399 struct softnet_data *sd;
2304 unsigned long flags; 2400 unsigned long flags;
2305 2401
2306 queue = &per_cpu(softnet_data, cpu); 2402 sd = &per_cpu(softnet_data, cpu);
2307 2403
2308 local_irq_save(flags); 2404 local_irq_save(flags);
2309 __get_cpu_var(netdev_rx_stat).total++; 2405 __get_cpu_var(netdev_rx_stat).total++;
2310 2406
2311 spin_lock(&queue->input_pkt_queue.lock); 2407 rps_lock(sd);
2312 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { 2408 if (sd->input_pkt_queue.qlen <= netdev_max_backlog) {
2313 if (queue->input_pkt_queue.qlen) { 2409 if (sd->input_pkt_queue.qlen) {
2314enqueue: 2410enqueue:
2315 __skb_queue_tail(&queue->input_pkt_queue, skb); 2411 __skb_queue_tail(&sd->input_pkt_queue, skb);
2316 spin_unlock_irqrestore(&queue->input_pkt_queue.lock, 2412#ifdef CONFIG_RPS
2317 flags); 2413 *qtail = sd->input_queue_head + sd->input_pkt_queue.qlen;
2414#endif
2415 rps_unlock(sd);
2416 local_irq_restore(flags);
2318 return NET_RX_SUCCESS; 2417 return NET_RX_SUCCESS;
2319 } 2418 }
2320 2419
2321 /* Schedule NAPI for backlog device */ 2420 /* Schedule NAPI for backlog device */
2322 if (napi_schedule_prep(&queue->backlog)) { 2421 if (napi_schedule_prep(&sd->backlog)) {
2323 if (cpu != smp_processor_id()) { 2422 if (!rps_ipi_queued(sd))
2324 struct rps_remote_softirq_cpus *rcpus = 2423 __napi_schedule(&sd->backlog);
2325 &__get_cpu_var(rps_remote_softirq_cpus);
2326
2327 cpu_set(cpu, rcpus->mask[rcpus->select]);
2328 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2329 } else
2330 __napi_schedule(&queue->backlog);
2331 } 2424 }
2332 goto enqueue; 2425 goto enqueue;
2333 } 2426 }
2334 2427
2335 spin_unlock(&queue->input_pkt_queue.lock); 2428 rps_unlock(sd);
2336 2429
2337 __get_cpu_var(netdev_rx_stat).dropped++; 2430 __get_cpu_var(netdev_rx_stat).dropped++;
2338 local_irq_restore(flags); 2431 local_irq_restore(flags);
@@ -2358,7 +2451,7 @@ enqueue:
2358 2451
2359int netif_rx(struct sk_buff *skb) 2452int netif_rx(struct sk_buff *skb)
2360{ 2453{
2361 int cpu; 2454 int ret;
2362 2455
2363 /* if netpoll wants it, pretend we never saw it */ 2456 /* if netpoll wants it, pretend we never saw it */
2364 if (netpoll_rx(skb)) 2457 if (netpoll_rx(skb))
@@ -2367,11 +2460,29 @@ int netif_rx(struct sk_buff *skb)
2367 if (!skb->tstamp.tv64) 2460 if (!skb->tstamp.tv64)
2368 net_timestamp(skb); 2461 net_timestamp(skb);
2369 2462
2370 cpu = get_rps_cpu(skb->dev, skb); 2463#ifdef CONFIG_RPS
2371 if (cpu < 0) 2464 {
2372 cpu = smp_processor_id(); 2465 struct rps_dev_flow voidflow, *rflow = &voidflow;
2466 int cpu;
2373 2467
2374 return enqueue_to_backlog(skb, cpu); 2468 rcu_read_lock();
2469
2470 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2471 if (cpu < 0)
2472 cpu = smp_processor_id();
2473
2474 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2475
2476 rcu_read_unlock();
2477 }
2478#else
2479 {
2480 unsigned int qtail;
2481 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2482 put_cpu();
2483 }
2484#endif
2485 return ret;
2375} 2486}
2376EXPORT_SYMBOL(netif_rx); 2487EXPORT_SYMBOL(netif_rx);
2377 2488
@@ -2608,10 +2719,60 @@ void netif_nit_deliver(struct sk_buff *skb)
2608 rcu_read_unlock(); 2719 rcu_read_unlock();
2609} 2720}
2610 2721
2611int __netif_receive_skb(struct sk_buff *skb) 2722static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2723 struct net_device *master)
2724{
2725 if (skb->pkt_type == PACKET_HOST) {
2726 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2727
2728 memcpy(dest, master->dev_addr, ETH_ALEN);
2729 }
2730}
2731
2732/* On bonding slaves other than the currently active slave, suppress
2733 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2734 * ARP on active-backup slaves with arp_validate enabled.
2735 */
2736int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2737{
2738 struct net_device *dev = skb->dev;
2739
2740 if (master->priv_flags & IFF_MASTER_ARPMON)
2741 dev->last_rx = jiffies;
2742
2743 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2744 /* Do address unmangle. The local destination address
2745 * will be always the one master has. Provides the right
2746 * functionality in a bridge.
2747 */
2748 skb_bond_set_mac_by_master(skb, master);
2749 }
2750
2751 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2752 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2753 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2754 return 0;
2755
2756 if (master->priv_flags & IFF_MASTER_ALB) {
2757 if (skb->pkt_type != PACKET_BROADCAST &&
2758 skb->pkt_type != PACKET_MULTICAST)
2759 return 0;
2760 }
2761 if (master->priv_flags & IFF_MASTER_8023AD &&
2762 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2763 return 0;
2764
2765 return 1;
2766 }
2767 return 0;
2768}
2769EXPORT_SYMBOL(__skb_bond_should_drop);
2770
2771static int __netif_receive_skb(struct sk_buff *skb)
2612{ 2772{
2613 struct packet_type *ptype, *pt_prev; 2773 struct packet_type *ptype, *pt_prev;
2614 struct net_device *orig_dev; 2774 struct net_device *orig_dev;
2775 struct net_device *master;
2615 struct net_device *null_or_orig; 2776 struct net_device *null_or_orig;
2616 struct net_device *null_or_bond; 2777 struct net_device *null_or_bond;
2617 int ret = NET_RX_DROP; 2778 int ret = NET_RX_DROP;
@@ -2632,11 +2793,12 @@ int __netif_receive_skb(struct sk_buff *skb)
2632 2793
2633 null_or_orig = NULL; 2794 null_or_orig = NULL;
2634 orig_dev = skb->dev; 2795 orig_dev = skb->dev;
2635 if (orig_dev->master) { 2796 master = ACCESS_ONCE(orig_dev->master);
2636 if (skb_bond_should_drop(skb)) 2797 if (master) {
2798 if (skb_bond_should_drop(skb, master))
2637 null_or_orig = orig_dev; /* deliver only exact match */ 2799 null_or_orig = orig_dev; /* deliver only exact match */
2638 else 2800 else
2639 skb->dev = orig_dev->master; 2801 skb->dev = master;
2640 } 2802 }
2641 2803
2642 __get_cpu_var(netdev_rx_stat).total++; 2804 __get_cpu_var(netdev_rx_stat).total++;
@@ -2735,29 +2897,46 @@ out:
2735 */ 2897 */
2736int netif_receive_skb(struct sk_buff *skb) 2898int netif_receive_skb(struct sk_buff *skb)
2737{ 2899{
2738 int cpu; 2900#ifdef CONFIG_RPS
2901 struct rps_dev_flow voidflow, *rflow = &voidflow;
2902 int cpu, ret;
2903
2904 rcu_read_lock();
2739 2905
2740 cpu = get_rps_cpu(skb->dev, skb); 2906 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2741 2907
2742 if (cpu < 0) 2908 if (cpu >= 0) {
2743 return __netif_receive_skb(skb); 2909 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2744 else 2910 rcu_read_unlock();
2745 return enqueue_to_backlog(skb, cpu); 2911 } else {
2912 rcu_read_unlock();
2913 ret = __netif_receive_skb(skb);
2914 }
2915
2916 return ret;
2917#else
2918 return __netif_receive_skb(skb);
2919#endif
2746} 2920}
2747EXPORT_SYMBOL(netif_receive_skb); 2921EXPORT_SYMBOL(netif_receive_skb);
2748 2922
2749/* Network device is going away, flush any packets still pending */ 2923/* Network device is going away, flush any packets still pending
2924 * Called with irqs disabled.
2925 */
2750static void flush_backlog(void *arg) 2926static void flush_backlog(void *arg)
2751{ 2927{
2752 struct net_device *dev = arg; 2928 struct net_device *dev = arg;
2753 struct softnet_data *queue = &__get_cpu_var(softnet_data); 2929 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2754 struct sk_buff *skb, *tmp; 2930 struct sk_buff *skb, *tmp;
2755 2931
2756 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) 2932 rps_lock(sd);
2933 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp)
2757 if (skb->dev == dev) { 2934 if (skb->dev == dev) {
2758 __skb_unlink(skb, &queue->input_pkt_queue); 2935 __skb_unlink(skb, &sd->input_pkt_queue);
2759 kfree_skb(skb); 2936 kfree_skb(skb);
2937 input_queue_head_incr(sd);
2760 } 2938 }
2939 rps_unlock(sd);
2761} 2940}
2762 2941
2763static int napi_gro_complete(struct sk_buff *skb) 2942static int napi_gro_complete(struct sk_buff *skb)
@@ -3063,24 +3242,27 @@ EXPORT_SYMBOL(napi_gro_frags);
3063static int process_backlog(struct napi_struct *napi, int quota) 3242static int process_backlog(struct napi_struct *napi, int quota)
3064{ 3243{
3065 int work = 0; 3244 int work = 0;
3066 struct softnet_data *queue = &__get_cpu_var(softnet_data); 3245 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3067 unsigned long start_time = jiffies;
3068 3246
3069 napi->weight = weight_p; 3247 napi->weight = weight_p;
3070 do { 3248 do {
3071 struct sk_buff *skb; 3249 struct sk_buff *skb;
3072 3250
3073 spin_lock_irq(&queue->input_pkt_queue.lock); 3251 local_irq_disable();
3074 skb = __skb_dequeue(&queue->input_pkt_queue); 3252 rps_lock(sd);
3253 skb = __skb_dequeue(&sd->input_pkt_queue);
3075 if (!skb) { 3254 if (!skb) {
3076 __napi_complete(napi); 3255 __napi_complete(napi);
3077 spin_unlock_irq(&queue->input_pkt_queue.lock); 3256 rps_unlock(sd);
3257 local_irq_enable();
3078 break; 3258 break;
3079 } 3259 }
3080 spin_unlock_irq(&queue->input_pkt_queue.lock); 3260 input_queue_head_incr(sd);
3261 rps_unlock(sd);
3262 local_irq_enable();
3081 3263
3082 __netif_receive_skb(skb); 3264 __netif_receive_skb(skb);
3083 } while (++work < quota && jiffies == start_time); 3265 } while (++work < quota);
3084 3266
3085 return work; 3267 return work;
3086} 3268}
@@ -3169,20 +3351,32 @@ void netif_napi_del(struct napi_struct *napi)
3169EXPORT_SYMBOL(netif_napi_del); 3351EXPORT_SYMBOL(netif_napi_del);
3170 3352
3171/* 3353/*
3172 * net_rps_action sends any pending IPI's for rps. This is only called from 3354 * net_rps_action sends any pending IPI's for rps.
3173 * softirq and interrupts must be enabled. 3355 * Note: called with local irq disabled, but exits with local irq enabled.
3174 */ 3356 */
3175static void net_rps_action(cpumask_t *mask) 3357static void net_rps_action_and_irq_disable(void)
3176{ 3358{
3177 int cpu; 3359#ifdef CONFIG_RPS
3360 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3361 struct softnet_data *remsd = sd->rps_ipi_list;
3178 3362
3179 /* Send pending IPI's to kick RPS processing on remote cpus. */ 3363 if (remsd) {
3180 for_each_cpu_mask_nr(cpu, *mask) { 3364 sd->rps_ipi_list = NULL;
3181 struct softnet_data *queue = &per_cpu(softnet_data, cpu); 3365
3182 if (cpu_online(cpu)) 3366 local_irq_enable();
3183 __smp_call_function_single(cpu, &queue->csd, 0); 3367
3184 } 3368 /* Send pending IPI's to kick RPS processing on remote cpus. */
3185 cpus_clear(*mask); 3369 while (remsd) {
3370 struct softnet_data *next = remsd->rps_ipi_next;
3371
3372 if (cpu_online(remsd->cpu))
3373 __smp_call_function_single(remsd->cpu,
3374 &remsd->csd, 0);
3375 remsd = next;
3376 }
3377 } else
3378#endif
3379 local_irq_enable();
3186} 3380}
3187 3381
3188static void net_rx_action(struct softirq_action *h) 3382static void net_rx_action(struct softirq_action *h)
@@ -3191,8 +3385,6 @@ static void net_rx_action(struct softirq_action *h)
3191 unsigned long time_limit = jiffies + 2; 3385 unsigned long time_limit = jiffies + 2;
3192 int budget = netdev_budget; 3386 int budget = netdev_budget;
3193 void *have; 3387 void *have;
3194 int select;
3195 struct rps_remote_softirq_cpus *rcpus;
3196 3388
3197 local_irq_disable(); 3389 local_irq_disable();
3198 3390
@@ -3255,13 +3447,7 @@ static void net_rx_action(struct softirq_action *h)
3255 netpoll_poll_unlock(have); 3447 netpoll_poll_unlock(have);
3256 } 3448 }
3257out: 3449out:
3258 rcpus = &__get_cpu_var(rps_remote_softirq_cpus); 3450 net_rps_action_and_irq_disable();
3259 select = rcpus->select;
3260 rcpus->select ^= 1;
3261
3262 local_irq_enable();
3263
3264 net_rps_action(&rcpus->mask[select]);
3265 3451
3266#ifdef CONFIG_NET_DMA 3452#ifdef CONFIG_NET_DMA
3267 /* 3453 /*
@@ -3733,11 +3919,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
3733 3919
3734 slave->master = master; 3920 slave->master = master;
3735 3921
3736 synchronize_net(); 3922 if (old) {
3737 3923 synchronize_net();
3738 if (old)
3739 dev_put(old); 3924 dev_put(old);
3740 3925 }
3741 if (master) 3926 if (master)
3742 slave->flags |= IFF_SLAVE; 3927 slave->flags |= IFF_SLAVE;
3743 else 3928 else
@@ -3914,562 +4099,6 @@ void dev_set_rx_mode(struct net_device *dev)
3914 netif_addr_unlock_bh(dev); 4099 netif_addr_unlock_bh(dev);
3915} 4100}
3916 4101
3917/* hw addresses list handling functions */
3918
3919static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3920 int addr_len, unsigned char addr_type)
3921{
3922 struct netdev_hw_addr *ha;
3923 int alloc_size;
3924
3925 if (addr_len > MAX_ADDR_LEN)
3926 return -EINVAL;
3927
3928 list_for_each_entry(ha, &list->list, list) {
3929 if (!memcmp(ha->addr, addr, addr_len) &&
3930 ha->type == addr_type) {
3931 ha->refcount++;
3932 return 0;
3933 }
3934 }
3935
3936
3937 alloc_size = sizeof(*ha);
3938 if (alloc_size < L1_CACHE_BYTES)
3939 alloc_size = L1_CACHE_BYTES;
3940 ha = kmalloc(alloc_size, GFP_ATOMIC);
3941 if (!ha)
3942 return -ENOMEM;
3943 memcpy(ha->addr, addr, addr_len);
3944 ha->type = addr_type;
3945 ha->refcount = 1;
3946 ha->synced = false;
3947 list_add_tail_rcu(&ha->list, &list->list);
3948 list->count++;
3949 return 0;
3950}
3951
3952static void ha_rcu_free(struct rcu_head *head)
3953{
3954 struct netdev_hw_addr *ha;
3955
3956 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3957 kfree(ha);
3958}
3959
3960static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3961 int addr_len, unsigned char addr_type)
3962{
3963 struct netdev_hw_addr *ha;
3964
3965 list_for_each_entry(ha, &list->list, list) {
3966 if (!memcmp(ha->addr, addr, addr_len) &&
3967 (ha->type == addr_type || !addr_type)) {
3968 if (--ha->refcount)
3969 return 0;
3970 list_del_rcu(&ha->list);
3971 call_rcu(&ha->rcu_head, ha_rcu_free);
3972 list->count--;
3973 return 0;
3974 }
3975 }
3976 return -ENOENT;
3977}
3978
3979static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3980 struct netdev_hw_addr_list *from_list,
3981 int addr_len,
3982 unsigned char addr_type)
3983{
3984 int err;
3985 struct netdev_hw_addr *ha, *ha2;
3986 unsigned char type;
3987
3988 list_for_each_entry(ha, &from_list->list, list) {
3989 type = addr_type ? addr_type : ha->type;
3990 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3991 if (err)
3992 goto unroll;
3993 }
3994 return 0;
3995
3996unroll:
3997 list_for_each_entry(ha2, &from_list->list, list) {
3998 if (ha2 == ha)
3999 break;
4000 type = addr_type ? addr_type : ha2->type;
4001 __hw_addr_del(to_list, ha2->addr, addr_len, type);
4002 }
4003 return err;
4004}
4005
4006static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
4007 struct netdev_hw_addr_list *from_list,
4008 int addr_len,
4009 unsigned char addr_type)
4010{
4011 struct netdev_hw_addr *ha;
4012 unsigned char type;
4013
4014 list_for_each_entry(ha, &from_list->list, list) {
4015 type = addr_type ? addr_type : ha->type;
4016 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
4017 }
4018}
4019
4020static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4021 struct netdev_hw_addr_list *from_list,
4022 int addr_len)
4023{
4024 int err = 0;
4025 struct netdev_hw_addr *ha, *tmp;
4026
4027 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
4028 if (!ha->synced) {
4029 err = __hw_addr_add(to_list, ha->addr,
4030 addr_len, ha->type);
4031 if (err)
4032 break;
4033 ha->synced = true;
4034 ha->refcount++;
4035 } else if (ha->refcount == 1) {
4036 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
4037 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
4038 }
4039 }
4040 return err;
4041}
4042
4043static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4044 struct netdev_hw_addr_list *from_list,
4045 int addr_len)
4046{
4047 struct netdev_hw_addr *ha, *tmp;
4048
4049 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
4050 if (ha->synced) {
4051 __hw_addr_del(to_list, ha->addr,
4052 addr_len, ha->type);
4053 ha->synced = false;
4054 __hw_addr_del(from_list, ha->addr,
4055 addr_len, ha->type);
4056 }
4057 }
4058}
4059
4060static void __hw_addr_flush(struct netdev_hw_addr_list *list)
4061{
4062 struct netdev_hw_addr *ha, *tmp;
4063
4064 list_for_each_entry_safe(ha, tmp, &list->list, list) {
4065 list_del_rcu(&ha->list);
4066 call_rcu(&ha->rcu_head, ha_rcu_free);
4067 }
4068 list->count = 0;
4069}
4070
4071static void __hw_addr_init(struct netdev_hw_addr_list *list)
4072{
4073 INIT_LIST_HEAD(&list->list);
4074 list->count = 0;
4075}
4076
4077/* Device addresses handling functions */
4078
4079static void dev_addr_flush(struct net_device *dev)
4080{
4081 /* rtnl_mutex must be held here */
4082
4083 __hw_addr_flush(&dev->dev_addrs);
4084 dev->dev_addr = NULL;
4085}
4086
4087static int dev_addr_init(struct net_device *dev)
4088{
4089 unsigned char addr[MAX_ADDR_LEN];
4090 struct netdev_hw_addr *ha;
4091 int err;
4092
4093 /* rtnl_mutex must be held here */
4094
4095 __hw_addr_init(&dev->dev_addrs);
4096 memset(addr, 0, sizeof(addr));
4097 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
4098 NETDEV_HW_ADDR_T_LAN);
4099 if (!err) {
4100 /*
4101 * Get the first (previously created) address from the list
4102 * and set dev_addr pointer to this location.
4103 */
4104 ha = list_first_entry(&dev->dev_addrs.list,
4105 struct netdev_hw_addr, list);
4106 dev->dev_addr = ha->addr;
4107 }
4108 return err;
4109}
4110
4111/**
4112 * dev_addr_add - Add a device address
4113 * @dev: device
4114 * @addr: address to add
4115 * @addr_type: address type
4116 *
4117 * Add a device address to the device or increase the reference count if
4118 * it already exists.
4119 *
4120 * The caller must hold the rtnl_mutex.
4121 */
4122int dev_addr_add(struct net_device *dev, unsigned char *addr,
4123 unsigned char addr_type)
4124{
4125 int err;
4126
4127 ASSERT_RTNL();
4128
4129 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
4130 if (!err)
4131 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4132 return err;
4133}
4134EXPORT_SYMBOL(dev_addr_add);
4135
4136/**
4137 * dev_addr_del - Release a device address.
4138 * @dev: device
4139 * @addr: address to delete
4140 * @addr_type: address type
4141 *
4142 * Release reference to a device address and remove it from the device
4143 * if the reference count drops to zero.
4144 *
4145 * The caller must hold the rtnl_mutex.
4146 */
4147int dev_addr_del(struct net_device *dev, unsigned char *addr,
4148 unsigned char addr_type)
4149{
4150 int err;
4151 struct netdev_hw_addr *ha;
4152
4153 ASSERT_RTNL();
4154
4155 /*
4156 * We can not remove the first address from the list because
4157 * dev->dev_addr points to that.
4158 */
4159 ha = list_first_entry(&dev->dev_addrs.list,
4160 struct netdev_hw_addr, list);
4161 if (ha->addr == dev->dev_addr && ha->refcount == 1)
4162 return -ENOENT;
4163
4164 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
4165 addr_type);
4166 if (!err)
4167 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4168 return err;
4169}
4170EXPORT_SYMBOL(dev_addr_del);
4171
4172/**
4173 * dev_addr_add_multiple - Add device addresses from another device
4174 * @to_dev: device to which addresses will be added
4175 * @from_dev: device from which addresses will be added
4176 * @addr_type: address type - 0 means type will be used from from_dev
4177 *
4178 * Add device addresses of the one device to another.
4179 **
4180 * The caller must hold the rtnl_mutex.
4181 */
4182int dev_addr_add_multiple(struct net_device *to_dev,
4183 struct net_device *from_dev,
4184 unsigned char addr_type)
4185{
4186 int err;
4187
4188 ASSERT_RTNL();
4189
4190 if (from_dev->addr_len != to_dev->addr_len)
4191 return -EINVAL;
4192 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
4193 to_dev->addr_len, addr_type);
4194 if (!err)
4195 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4196 return err;
4197}
4198EXPORT_SYMBOL(dev_addr_add_multiple);
4199
4200/**
4201 * dev_addr_del_multiple - Delete device addresses by another device
4202 * @to_dev: device where the addresses will be deleted
4203 * @from_dev: device by which addresses the addresses will be deleted
4204 * @addr_type: address type - 0 means type will used from from_dev
4205 *
4206 * Deletes addresses in to device by the list of addresses in from device.
4207 *
4208 * The caller must hold the rtnl_mutex.
4209 */
4210int dev_addr_del_multiple(struct net_device *to_dev,
4211 struct net_device *from_dev,
4212 unsigned char addr_type)
4213{
4214 ASSERT_RTNL();
4215
4216 if (from_dev->addr_len != to_dev->addr_len)
4217 return -EINVAL;
4218 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
4219 to_dev->addr_len, addr_type);
4220 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4221 return 0;
4222}
4223EXPORT_SYMBOL(dev_addr_del_multiple);
4224
4225/* multicast addresses handling functions */
4226
4227int __dev_addr_delete(struct dev_addr_list **list, int *count,
4228 void *addr, int alen, int glbl)
4229{
4230 struct dev_addr_list *da;
4231
4232 for (; (da = *list) != NULL; list = &da->next) {
4233 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4234 alen == da->da_addrlen) {
4235 if (glbl) {
4236 int old_glbl = da->da_gusers;
4237 da->da_gusers = 0;
4238 if (old_glbl == 0)
4239 break;
4240 }
4241 if (--da->da_users)
4242 return 0;
4243
4244 *list = da->next;
4245 kfree(da);
4246 (*count)--;
4247 return 0;
4248 }
4249 }
4250 return -ENOENT;
4251}
4252
4253int __dev_addr_add(struct dev_addr_list **list, int *count,
4254 void *addr, int alen, int glbl)
4255{
4256 struct dev_addr_list *da;
4257
4258 for (da = *list; da != NULL; da = da->next) {
4259 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4260 da->da_addrlen == alen) {
4261 if (glbl) {
4262 int old_glbl = da->da_gusers;
4263 da->da_gusers = 1;
4264 if (old_glbl)
4265 return 0;
4266 }
4267 da->da_users++;
4268 return 0;
4269 }
4270 }
4271
4272 da = kzalloc(sizeof(*da), GFP_ATOMIC);
4273 if (da == NULL)
4274 return -ENOMEM;
4275 memcpy(da->da_addr, addr, alen);
4276 da->da_addrlen = alen;
4277 da->da_users = 1;
4278 da->da_gusers = glbl ? 1 : 0;
4279 da->next = *list;
4280 *list = da;
4281 (*count)++;
4282 return 0;
4283}
4284
4285/**
4286 * dev_unicast_delete - Release secondary unicast address.
4287 * @dev: device
4288 * @addr: address to delete
4289 *
4290 * Release reference to a secondary unicast address and remove it
4291 * from the device if the reference count drops to zero.
4292 *
4293 * The caller must hold the rtnl_mutex.
4294 */
4295int dev_unicast_delete(struct net_device *dev, void *addr)
4296{
4297 int err;
4298
4299 ASSERT_RTNL();
4300
4301 netif_addr_lock_bh(dev);
4302 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4303 NETDEV_HW_ADDR_T_UNICAST);
4304 if (!err)
4305 __dev_set_rx_mode(dev);
4306 netif_addr_unlock_bh(dev);
4307 return err;
4308}
4309EXPORT_SYMBOL(dev_unicast_delete);
4310
4311/**
4312 * dev_unicast_add - add a secondary unicast address
4313 * @dev: device
4314 * @addr: address to add
4315 *
4316 * Add a secondary unicast address to the device or increase
4317 * the reference count if it already exists.
4318 *
4319 * The caller must hold the rtnl_mutex.
4320 */
4321int dev_unicast_add(struct net_device *dev, void *addr)
4322{
4323 int err;
4324
4325 ASSERT_RTNL();
4326
4327 netif_addr_lock_bh(dev);
4328 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4329 NETDEV_HW_ADDR_T_UNICAST);
4330 if (!err)
4331 __dev_set_rx_mode(dev);
4332 netif_addr_unlock_bh(dev);
4333 return err;
4334}
4335EXPORT_SYMBOL(dev_unicast_add);
4336
4337int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4338 struct dev_addr_list **from, int *from_count)
4339{
4340 struct dev_addr_list *da, *next;
4341 int err = 0;
4342
4343 da = *from;
4344 while (da != NULL) {
4345 next = da->next;
4346 if (!da->da_synced) {
4347 err = __dev_addr_add(to, to_count,
4348 da->da_addr, da->da_addrlen, 0);
4349 if (err < 0)
4350 break;
4351 da->da_synced = 1;
4352 da->da_users++;
4353 } else if (da->da_users == 1) {
4354 __dev_addr_delete(to, to_count,
4355 da->da_addr, da->da_addrlen, 0);
4356 __dev_addr_delete(from, from_count,
4357 da->da_addr, da->da_addrlen, 0);
4358 }
4359 da = next;
4360 }
4361 return err;
4362}
4363EXPORT_SYMBOL_GPL(__dev_addr_sync);
4364
4365void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4366 struct dev_addr_list **from, int *from_count)
4367{
4368 struct dev_addr_list *da, *next;
4369
4370 da = *from;
4371 while (da != NULL) {
4372 next = da->next;
4373 if (da->da_synced) {
4374 __dev_addr_delete(to, to_count,
4375 da->da_addr, da->da_addrlen, 0);
4376 da->da_synced = 0;
4377 __dev_addr_delete(from, from_count,
4378 da->da_addr, da->da_addrlen, 0);
4379 }
4380 da = next;
4381 }
4382}
4383EXPORT_SYMBOL_GPL(__dev_addr_unsync);
4384
4385/**
4386 * dev_unicast_sync - Synchronize device's unicast list to another device
4387 * @to: destination device
4388 * @from: source device
4389 *
4390 * Add newly added addresses to the destination device and release
4391 * addresses that have no users left. The source device must be
4392 * locked by netif_tx_lock_bh.
4393 *
4394 * This function is intended to be called from the dev->set_rx_mode
4395 * function of layered software devices.
4396 */
4397int dev_unicast_sync(struct net_device *to, struct net_device *from)
4398{
4399 int err = 0;
4400
4401 if (to->addr_len != from->addr_len)
4402 return -EINVAL;
4403
4404 netif_addr_lock_bh(to);
4405 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
4406 if (!err)
4407 __dev_set_rx_mode(to);
4408 netif_addr_unlock_bh(to);
4409 return err;
4410}
4411EXPORT_SYMBOL(dev_unicast_sync);
4412
4413/**
4414 * dev_unicast_unsync - Remove synchronized addresses from the destination device
4415 * @to: destination device
4416 * @from: source device
4417 *
4418 * Remove all addresses that were added to the destination device by
4419 * dev_unicast_sync(). This function is intended to be called from the
4420 * dev->stop function of layered software devices.
4421 */
4422void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4423{
4424 if (to->addr_len != from->addr_len)
4425 return;
4426
4427 netif_addr_lock_bh(from);
4428 netif_addr_lock(to);
4429 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
4430 __dev_set_rx_mode(to);
4431 netif_addr_unlock(to);
4432 netif_addr_unlock_bh(from);
4433}
4434EXPORT_SYMBOL(dev_unicast_unsync);
4435
4436static void dev_unicast_flush(struct net_device *dev)
4437{
4438 netif_addr_lock_bh(dev);
4439 __hw_addr_flush(&dev->uc);
4440 netif_addr_unlock_bh(dev);
4441}
4442
4443static void dev_unicast_init(struct net_device *dev)
4444{
4445 __hw_addr_init(&dev->uc);
4446}
4447
4448
4449static void __dev_addr_discard(struct dev_addr_list **list)
4450{
4451 struct dev_addr_list *tmp;
4452
4453 while (*list != NULL) {
4454 tmp = *list;
4455 *list = tmp->next;
4456 if (tmp->da_users > tmp->da_gusers)
4457 printk("__dev_addr_discard: address leakage! "
4458 "da_users=%d\n", tmp->da_users);
4459 kfree(tmp);
4460 }
4461}
4462
4463static void dev_addr_discard(struct net_device *dev)
4464{
4465 netif_addr_lock_bh(dev);
4466
4467 __dev_addr_discard(&dev->mc_list);
4468 netdev_mc_count(dev) = 0;
4469
4470 netif_addr_unlock_bh(dev);
4471}
4472
4473/** 4102/**
4474 * dev_get_flags - get flags reported to userspace 4103 * dev_get_flags - get flags reported to userspace
4475 * @dev: device 4104 * @dev: device
@@ -4780,8 +4409,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4780 return -EINVAL; 4409 return -EINVAL;
4781 if (!netif_device_present(dev)) 4410 if (!netif_device_present(dev))
4782 return -ENODEV; 4411 return -ENODEV;
4783 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, 4412 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4784 dev->addr_len, 1);
4785 4413
4786 case SIOCDELMULTI: 4414 case SIOCDELMULTI:
4787 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4415 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
@@ -4789,8 +4417,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4789 return -EINVAL; 4417 return -EINVAL;
4790 if (!netif_device_present(dev)) 4418 if (!netif_device_present(dev))
4791 return -ENODEV; 4419 return -ENODEV;
4792 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, 4420 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4793 dev->addr_len, 1);
4794 4421
4795 case SIOCSIFTXQLEN: 4422 case SIOCSIFTXQLEN:
4796 if (ifr->ifr_qlen < 0) 4423 if (ifr->ifr_qlen < 0)
@@ -5097,8 +4724,8 @@ static void rollback_registered_many(struct list_head *head)
5097 /* 4724 /*
5098 * Flush the unicast and multicast chains 4725 * Flush the unicast and multicast chains
5099 */ 4726 */
5100 dev_unicast_flush(dev); 4727 dev_uc_flush(dev);
5101 dev_addr_discard(dev); 4728 dev_mc_flush(dev);
5102 4729
5103 if (dev->netdev_ops->ndo_uninit) 4730 if (dev->netdev_ops->ndo_uninit)
5104 dev->netdev_ops->ndo_uninit(dev); 4731 dev->netdev_ops->ndo_uninit(dev);
@@ -5247,6 +4874,7 @@ int register_netdevice(struct net_device *dev)
5247 4874
5248 dev->iflink = -1; 4875 dev->iflink = -1;
5249 4876
4877#ifdef CONFIG_RPS
5250 if (!dev->num_rx_queues) { 4878 if (!dev->num_rx_queues) {
5251 /* 4879 /*
5252 * Allocate a single RX queue if driver never called 4880 * Allocate a single RX queue if driver never called
@@ -5263,7 +4891,7 @@ int register_netdevice(struct net_device *dev)
5263 atomic_set(&dev->_rx->count, 1); 4891 atomic_set(&dev->_rx->count, 1);
5264 dev->num_rx_queues = 1; 4892 dev->num_rx_queues = 1;
5265 } 4893 }
5266 4894#endif
5267 /* Init, if this function is available */ 4895 /* Init, if this function is available */
5268 if (dev->netdev_ops->ndo_init) { 4896 if (dev->netdev_ops->ndo_init) {
5269 ret = dev->netdev_ops->ndo_init(dev); 4897 ret = dev->netdev_ops->ndo_init(dev);
@@ -5621,11 +5249,13 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5621 void (*setup)(struct net_device *), unsigned int queue_count) 5249 void (*setup)(struct net_device *), unsigned int queue_count)
5622{ 5250{
5623 struct netdev_queue *tx; 5251 struct netdev_queue *tx;
5624 struct netdev_rx_queue *rx;
5625 struct net_device *dev; 5252 struct net_device *dev;
5626 size_t alloc_size; 5253 size_t alloc_size;
5627 struct net_device *p; 5254 struct net_device *p;
5255#ifdef CONFIG_RPS
5256 struct netdev_rx_queue *rx;
5628 int i; 5257 int i;
5258#endif
5629 5259
5630 BUG_ON(strlen(name) >= sizeof(dev->name)); 5260 BUG_ON(strlen(name) >= sizeof(dev->name));
5631 5261
@@ -5651,6 +5281,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5651 goto free_p; 5281 goto free_p;
5652 } 5282 }
5653 5283
5284#ifdef CONFIG_RPS
5654 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5285 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5655 if (!rx) { 5286 if (!rx) {
5656 printk(KERN_ERR "alloc_netdev: Unable to allocate " 5287 printk(KERN_ERR "alloc_netdev: Unable to allocate "
@@ -5666,6 +5297,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5666 */ 5297 */
5667 for (i = 0; i < queue_count; i++) 5298 for (i = 0; i < queue_count; i++)
5668 rx[i].first = rx; 5299 rx[i].first = rx;
5300#endif
5669 5301
5670 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5302 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5671 dev->padded = (char *)dev - (char *)p; 5303 dev->padded = (char *)dev - (char *)p;
@@ -5673,7 +5305,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5673 if (dev_addr_init(dev)) 5305 if (dev_addr_init(dev))
5674 goto free_rx; 5306 goto free_rx;
5675 5307
5676 dev_unicast_init(dev); 5308 dev_mc_init(dev);
5309 dev_uc_init(dev);
5677 5310
5678 dev_net_set(dev, &init_net); 5311 dev_net_set(dev, &init_net);
5679 5312
@@ -5681,8 +5314,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5681 dev->num_tx_queues = queue_count; 5314 dev->num_tx_queues = queue_count;
5682 dev->real_num_tx_queues = queue_count; 5315 dev->real_num_tx_queues = queue_count;
5683 5316
5317#ifdef CONFIG_RPS
5684 dev->_rx = rx; 5318 dev->_rx = rx;
5685 dev->num_rx_queues = queue_count; 5319 dev->num_rx_queues = queue_count;
5320#endif
5686 5321
5687 dev->gso_max_size = GSO_MAX_SIZE; 5322 dev->gso_max_size = GSO_MAX_SIZE;
5688 5323
@@ -5699,8 +5334,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5699 return dev; 5334 return dev;
5700 5335
5701free_rx: 5336free_rx:
5337#ifdef CONFIG_RPS
5702 kfree(rx); 5338 kfree(rx);
5703free_tx: 5339free_tx:
5340#endif
5704 kfree(tx); 5341 kfree(tx);
5705free_p: 5342free_p:
5706 kfree(p); 5343 kfree(p);
@@ -5903,8 +5540,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5903 /* 5540 /*
5904 * Flush the unicast and multicast chains 5541 * Flush the unicast and multicast chains
5905 */ 5542 */
5906 dev_unicast_flush(dev); 5543 dev_uc_flush(dev);
5907 dev_addr_discard(dev); 5544 dev_mc_flush(dev);
5908 5545
5909 netdev_unregister_kobject(dev); 5546 netdev_unregister_kobject(dev);
5910 5547
@@ -5980,8 +5617,10 @@ static int dev_cpu_callback(struct notifier_block *nfb,
5980 local_irq_enable(); 5617 local_irq_enable();
5981 5618
5982 /* Process offline CPU's input_pkt_queue */ 5619 /* Process offline CPU's input_pkt_queue */
5983 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) 5620 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5984 netif_rx(skb); 5621 netif_rx(skb);
5622 input_queue_head_incr(oldsd);
5623 }
5985 5624
5986 return NOTIFY_OK; 5625 return NOTIFY_OK;
5987} 5626}
@@ -6197,21 +5836,23 @@ static int __init net_dev_init(void)
6197 */ 5836 */
6198 5837
6199 for_each_possible_cpu(i) { 5838 for_each_possible_cpu(i) {
6200 struct softnet_data *queue; 5839 struct softnet_data *sd = &per_cpu(softnet_data, i);
6201 5840
6202 queue = &per_cpu(softnet_data, i); 5841 skb_queue_head_init(&sd->input_pkt_queue);
6203 skb_queue_head_init(&queue->input_pkt_queue); 5842 sd->completion_queue = NULL;
6204 queue->completion_queue = NULL; 5843 INIT_LIST_HEAD(&sd->poll_list);
6205 INIT_LIST_HEAD(&queue->poll_list);
6206 5844
6207 queue->csd.func = trigger_softirq; 5845#ifdef CONFIG_RPS
6208 queue->csd.info = queue; 5846 sd->csd.func = rps_trigger_softirq;
6209 queue->csd.flags = 0; 5847 sd->csd.info = sd;
5848 sd->csd.flags = 0;
5849 sd->cpu = i;
5850#endif
6210 5851
6211 queue->backlog.poll = process_backlog; 5852 sd->backlog.poll = process_backlog;
6212 queue->backlog.weight = weight_p; 5853 sd->backlog.weight = weight_p;
6213 queue->backlog.gro_list = NULL; 5854 sd->backlog.gro_list = NULL;
6214 queue->backlog.gro_count = 0; 5855 sd->backlog.gro_count = 0;
6215 } 5856 }
6216 5857
6217 dev_boot_phase = 0; 5858 dev_boot_phase = 0;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
new file mode 100644
index 000000000000..508f9c18992f
--- /dev/null
+++ b/net/core/dev_addr_lists.c
@@ -0,0 +1,741 @@
1/*
2 * net/core/dev_addr_lists.c - Functions for handling net device lists
3 * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This file contains functions for working with unicast, multicast and device
6 * addresses lists.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/list.h>
17#include <linux/proc_fs.h>
18
19/*
20 * General list handling functions
21 */
22
23static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
24 unsigned char *addr, int addr_len,
25 unsigned char addr_type, bool global)
26{
27 struct netdev_hw_addr *ha;
28 int alloc_size;
29
30 if (addr_len > MAX_ADDR_LEN)
31 return -EINVAL;
32
33 list_for_each_entry(ha, &list->list, list) {
34 if (!memcmp(ha->addr, addr, addr_len) &&
35 ha->type == addr_type) {
36 if (global) {
37 /* check if addr is already used as global */
38 if (ha->global_use)
39 return 0;
40 else
41 ha->global_use = true;
42 }
43 ha->refcount++;
44 return 0;
45 }
46 }
47
48
49 alloc_size = sizeof(*ha);
50 if (alloc_size < L1_CACHE_BYTES)
51 alloc_size = L1_CACHE_BYTES;
52 ha = kmalloc(alloc_size, GFP_ATOMIC);
53 if (!ha)
54 return -ENOMEM;
55 memcpy(ha->addr, addr, addr_len);
56 ha->type = addr_type;
57 ha->refcount = 1;
58 ha->global_use = global;
59 ha->synced = false;
60 list_add_tail_rcu(&ha->list, &list->list);
61 list->count++;
62 return 0;
63}
64
65static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
66 int addr_len, unsigned char addr_type)
67{
68 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
69}
70
71static void ha_rcu_free(struct rcu_head *head)
72{
73 struct netdev_hw_addr *ha;
74
75 ha = container_of(head, struct netdev_hw_addr, rcu_head);
76 kfree(ha);
77}
78
79static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
80 unsigned char *addr, int addr_len,
81 unsigned char addr_type, bool global)
82{
83 struct netdev_hw_addr *ha;
84
85 list_for_each_entry(ha, &list->list, list) {
86 if (!memcmp(ha->addr, addr, addr_len) &&
87 (ha->type == addr_type || !addr_type)) {
88 if (global) {
89 if (!ha->global_use)
90 break;
91 else
92 ha->global_use = false;
93 }
94 if (--ha->refcount)
95 return 0;
96 list_del_rcu(&ha->list);
97 call_rcu(&ha->rcu_head, ha_rcu_free);
98 list->count--;
99 return 0;
100 }
101 }
102 return -ENOENT;
103}
104
105static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
106 int addr_len, unsigned char addr_type)
107{
108 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false);
109}
110
111int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
112 struct netdev_hw_addr_list *from_list,
113 int addr_len, unsigned char addr_type)
114{
115 int err;
116 struct netdev_hw_addr *ha, *ha2;
117 unsigned char type;
118
119 list_for_each_entry(ha, &from_list->list, list) {
120 type = addr_type ? addr_type : ha->type;
121 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
122 if (err)
123 goto unroll;
124 }
125 return 0;
126
127unroll:
128 list_for_each_entry(ha2, &from_list->list, list) {
129 if (ha2 == ha)
130 break;
131 type = addr_type ? addr_type : ha2->type;
132 __hw_addr_del(to_list, ha2->addr, addr_len, type);
133 }
134 return err;
135}
136EXPORT_SYMBOL(__hw_addr_add_multiple);
137
138void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
139 struct netdev_hw_addr_list *from_list,
140 int addr_len, unsigned char addr_type)
141{
142 struct netdev_hw_addr *ha;
143 unsigned char type;
144
145 list_for_each_entry(ha, &from_list->list, list) {
146 type = addr_type ? addr_type : ha->type;
147 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
148 }
149}
150EXPORT_SYMBOL(__hw_addr_del_multiple);
151
152int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
153 struct netdev_hw_addr_list *from_list,
154 int addr_len)
155{
156 int err = 0;
157 struct netdev_hw_addr *ha, *tmp;
158
159 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
160 if (!ha->synced) {
161 err = __hw_addr_add(to_list, ha->addr,
162 addr_len, ha->type);
163 if (err)
164 break;
165 ha->synced = true;
166 ha->refcount++;
167 } else if (ha->refcount == 1) {
168 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
169 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
170 }
171 }
172 return err;
173}
174EXPORT_SYMBOL(__hw_addr_sync);
175
176void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
177 struct netdev_hw_addr_list *from_list,
178 int addr_len)
179{
180 struct netdev_hw_addr *ha, *tmp;
181
182 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
183 if (ha->synced) {
184 __hw_addr_del(to_list, ha->addr,
185 addr_len, ha->type);
186 ha->synced = false;
187 __hw_addr_del(from_list, ha->addr,
188 addr_len, ha->type);
189 }
190 }
191}
192EXPORT_SYMBOL(__hw_addr_unsync);
193
194void __hw_addr_flush(struct netdev_hw_addr_list *list)
195{
196 struct netdev_hw_addr *ha, *tmp;
197
198 list_for_each_entry_safe(ha, tmp, &list->list, list) {
199 list_del_rcu(&ha->list);
200 call_rcu(&ha->rcu_head, ha_rcu_free);
201 }
202 list->count = 0;
203}
204EXPORT_SYMBOL(__hw_addr_flush);
205
206void __hw_addr_init(struct netdev_hw_addr_list *list)
207{
208 INIT_LIST_HEAD(&list->list);
209 list->count = 0;
210}
211EXPORT_SYMBOL(__hw_addr_init);
212
213/*
214 * Device addresses handling functions
215 */
216
217/**
218 * dev_addr_flush - Flush device address list
219 * @dev: device
220 *
221 * Flush device address list and reset ->dev_addr.
222 *
223 * The caller must hold the rtnl_mutex.
224 */
225void dev_addr_flush(struct net_device *dev)
226{
227 /* rtnl_mutex must be held here */
228
229 __hw_addr_flush(&dev->dev_addrs);
230 dev->dev_addr = NULL;
231}
232EXPORT_SYMBOL(dev_addr_flush);
233
234/**
235 * dev_addr_init - Init device address list
236 * @dev: device
237 *
238 * Init device address list and create the first element,
239 * used by ->dev_addr.
240 *
241 * The caller must hold the rtnl_mutex.
242 */
243int dev_addr_init(struct net_device *dev)
244{
245 unsigned char addr[MAX_ADDR_LEN];
246 struct netdev_hw_addr *ha;
247 int err;
248
249 /* rtnl_mutex must be held here */
250
251 __hw_addr_init(&dev->dev_addrs);
252 memset(addr, 0, sizeof(addr));
253 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
254 NETDEV_HW_ADDR_T_LAN);
255 if (!err) {
256 /*
257 * Get the first (previously created) address from the list
258 * and set dev_addr pointer to this location.
259 */
260 ha = list_first_entry(&dev->dev_addrs.list,
261 struct netdev_hw_addr, list);
262 dev->dev_addr = ha->addr;
263 }
264 return err;
265}
266EXPORT_SYMBOL(dev_addr_init);
267
268/**
269 * dev_addr_add - Add a device address
270 * @dev: device
271 * @addr: address to add
272 * @addr_type: address type
273 *
274 * Add a device address to the device or increase the reference count if
275 * it already exists.
276 *
277 * The caller must hold the rtnl_mutex.
278 */
279int dev_addr_add(struct net_device *dev, unsigned char *addr,
280 unsigned char addr_type)
281{
282 int err;
283
284 ASSERT_RTNL();
285
286 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
287 if (!err)
288 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
289 return err;
290}
291EXPORT_SYMBOL(dev_addr_add);
292
293/**
294 * dev_addr_del - Release a device address.
295 * @dev: device
296 * @addr: address to delete
297 * @addr_type: address type
298 *
299 * Release reference to a device address and remove it from the device
300 * if the reference count drops to zero.
301 *
302 * The caller must hold the rtnl_mutex.
303 */
304int dev_addr_del(struct net_device *dev, unsigned char *addr,
305 unsigned char addr_type)
306{
307 int err;
308 struct netdev_hw_addr *ha;
309
310 ASSERT_RTNL();
311
312 /*
313 * We can not remove the first address from the list because
314 * dev->dev_addr points to that.
315 */
316 ha = list_first_entry(&dev->dev_addrs.list,
317 struct netdev_hw_addr, list);
318 if (ha->addr == dev->dev_addr && ha->refcount == 1)
319 return -ENOENT;
320
321 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
322 addr_type);
323 if (!err)
324 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
325 return err;
326}
327EXPORT_SYMBOL(dev_addr_del);
328
329/**
330 * dev_addr_add_multiple - Add device addresses from another device
331 * @to_dev: device to which addresses will be added
332 * @from_dev: device from which addresses will be added
333 * @addr_type: address type - 0 means type will be used from from_dev
334 *
335 * Add device addresses of the one device to another.
336 **
337 * The caller must hold the rtnl_mutex.
338 */
339int dev_addr_add_multiple(struct net_device *to_dev,
340 struct net_device *from_dev,
341 unsigned char addr_type)
342{
343 int err;
344
345 ASSERT_RTNL();
346
347 if (from_dev->addr_len != to_dev->addr_len)
348 return -EINVAL;
349 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
350 to_dev->addr_len, addr_type);
351 if (!err)
352 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
353 return err;
354}
355EXPORT_SYMBOL(dev_addr_add_multiple);
356
357/**
358 * dev_addr_del_multiple - Delete device addresses by another device
359 * @to_dev: device where the addresses will be deleted
360 * @from_dev: device by which addresses the addresses will be deleted
361 * @addr_type: address type - 0 means type will used from from_dev
362 *
363 * Deletes addresses in to device by the list of addresses in from device.
364 *
365 * The caller must hold the rtnl_mutex.
366 */
367int dev_addr_del_multiple(struct net_device *to_dev,
368 struct net_device *from_dev,
369 unsigned char addr_type)
370{
371 ASSERT_RTNL();
372
373 if (from_dev->addr_len != to_dev->addr_len)
374 return -EINVAL;
375 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
376 to_dev->addr_len, addr_type);
377 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
378 return 0;
379}
380EXPORT_SYMBOL(dev_addr_del_multiple);
381
382/*
383 * Unicast list handling functions
384 */
385
386/**
387 * dev_uc_add - Add a secondary unicast address
388 * @dev: device
389 * @addr: address to add
390 *
391 * Add a secondary unicast address to the device or increase
392 * the reference count if it already exists.
393 */
394int dev_uc_add(struct net_device *dev, unsigned char *addr)
395{
396 int err;
397
398 netif_addr_lock_bh(dev);
399 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
400 NETDEV_HW_ADDR_T_UNICAST);
401 if (!err)
402 __dev_set_rx_mode(dev);
403 netif_addr_unlock_bh(dev);
404 return err;
405}
406EXPORT_SYMBOL(dev_uc_add);
407
408/**
409 * dev_uc_del - Release secondary unicast address.
410 * @dev: device
411 * @addr: address to delete
412 *
413 * Release reference to a secondary unicast address and remove it
414 * from the device if the reference count drops to zero.
415 */
416int dev_uc_del(struct net_device *dev, unsigned char *addr)
417{
418 int err;
419
420 netif_addr_lock_bh(dev);
421 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
422 NETDEV_HW_ADDR_T_UNICAST);
423 if (!err)
424 __dev_set_rx_mode(dev);
425 netif_addr_unlock_bh(dev);
426 return err;
427}
428EXPORT_SYMBOL(dev_uc_del);
429
430/**
431 * dev_uc_sync - Synchronize device's unicast list to another device
432 * @to: destination device
433 * @from: source device
434 *
435 * Add newly added addresses to the destination device and release
436 * addresses that have no users left. The source device must be
437 * locked by netif_tx_lock_bh.
438 *
439 * This function is intended to be called from the dev->set_rx_mode
440 * function of layered software devices.
441 */
442int dev_uc_sync(struct net_device *to, struct net_device *from)
443{
444 int err = 0;
445
446 if (to->addr_len != from->addr_len)
447 return -EINVAL;
448
449 netif_addr_lock_bh(to);
450 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
451 if (!err)
452 __dev_set_rx_mode(to);
453 netif_addr_unlock_bh(to);
454 return err;
455}
456EXPORT_SYMBOL(dev_uc_sync);
457
458/**
459 * dev_uc_unsync - Remove synchronized addresses from the destination device
460 * @to: destination device
461 * @from: source device
462 *
463 * Remove all addresses that were added to the destination device by
464 * dev_uc_sync(). This function is intended to be called from the
465 * dev->stop function of layered software devices.
466 */
467void dev_uc_unsync(struct net_device *to, struct net_device *from)
468{
469 if (to->addr_len != from->addr_len)
470 return;
471
472 netif_addr_lock_bh(from);
473 netif_addr_lock(to);
474 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
475 __dev_set_rx_mode(to);
476 netif_addr_unlock(to);
477 netif_addr_unlock_bh(from);
478}
479EXPORT_SYMBOL(dev_uc_unsync);
480
481/**
482 * dev_uc_flush - Flush unicast addresses
483 * @dev: device
484 *
485 * Flush unicast addresses.
486 */
487void dev_uc_flush(struct net_device *dev)
488{
489 netif_addr_lock_bh(dev);
490 __hw_addr_flush(&dev->uc);
491 netif_addr_unlock_bh(dev);
492}
493EXPORT_SYMBOL(dev_uc_flush);
494
495/**
496 * dev_uc_flush - Init unicast address list
497 * @dev: device
498 *
499 * Init unicast address list.
500 */
501void dev_uc_init(struct net_device *dev)
502{
503 __hw_addr_init(&dev->uc);
504}
505EXPORT_SYMBOL(dev_uc_init);
506
507/*
508 * Multicast list handling functions
509 */
510
511static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
512 bool global)
513{
514 int err;
515
516 netif_addr_lock_bh(dev);
517 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
518 NETDEV_HW_ADDR_T_MULTICAST, global);
519 if (!err)
520 __dev_set_rx_mode(dev);
521 netif_addr_unlock_bh(dev);
522 return err;
523}
524/**
525 * dev_mc_add - Add a multicast address
526 * @dev: device
527 * @addr: address to add
528 *
529 * Add a multicast address to the device or increase
530 * the reference count if it already exists.
531 */
532int dev_mc_add(struct net_device *dev, unsigned char *addr)
533{
534 return __dev_mc_add(dev, addr, false);
535}
536EXPORT_SYMBOL(dev_mc_add);
537
538/**
539 * dev_mc_add_global - Add a global multicast address
540 * @dev: device
541 * @addr: address to add
542 *
543 * Add a global multicast address to the device.
544 */
545int dev_mc_add_global(struct net_device *dev, unsigned char *addr)
546{
547 return __dev_mc_add(dev, addr, true);
548}
549EXPORT_SYMBOL(dev_mc_add_global);
550
551static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
552 bool global)
553{
554 int err;
555
556 netif_addr_lock_bh(dev);
557 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
558 NETDEV_HW_ADDR_T_MULTICAST, global);
559 if (!err)
560 __dev_set_rx_mode(dev);
561 netif_addr_unlock_bh(dev);
562 return err;
563}
564
565/**
566 * dev_mc_del - Delete a multicast address.
567 * @dev: device
568 * @addr: address to delete
569 *
570 * Release reference to a multicast address and remove it
571 * from the device if the reference count drops to zero.
572 */
573int dev_mc_del(struct net_device *dev, unsigned char *addr)
574{
575 return __dev_mc_del(dev, addr, false);
576}
577EXPORT_SYMBOL(dev_mc_del);
578
579/**
580 * dev_mc_del_global - Delete a global multicast address.
581 * @dev: device
582 * @addr: address to delete
583 *
584 * Release reference to a multicast address and remove it
585 * from the device if the reference count drops to zero.
586 */
587int dev_mc_del_global(struct net_device *dev, unsigned char *addr)
588{
589 return __dev_mc_del(dev, addr, true);
590}
591EXPORT_SYMBOL(dev_mc_del_global);
592
593/**
594 * dev_mc_sync - Synchronize device's unicast list to another device
595 * @to: destination device
596 * @from: source device
597 *
598 * Add newly added addresses to the destination device and release
599 * addresses that have no users left. The source device must be
600 * locked by netif_tx_lock_bh.
601 *
602 * This function is intended to be called from the dev->set_multicast_list
603 * or dev->set_rx_mode function of layered software devices.
604 */
605int dev_mc_sync(struct net_device *to, struct net_device *from)
606{
607 int err = 0;
608
609 if (to->addr_len != from->addr_len)
610 return -EINVAL;
611
612 netif_addr_lock_bh(to);
613 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
614 if (!err)
615 __dev_set_rx_mode(to);
616 netif_addr_unlock_bh(to);
617 return err;
618}
619EXPORT_SYMBOL(dev_mc_sync);
620
621/**
622 * dev_mc_unsync - Remove synchronized addresses from the destination device
623 * @to: destination device
624 * @from: source device
625 *
626 * Remove all addresses that were added to the destination device by
627 * dev_mc_sync(). This function is intended to be called from the
628 * dev->stop function of layered software devices.
629 */
630void dev_mc_unsync(struct net_device *to, struct net_device *from)
631{
632 if (to->addr_len != from->addr_len)
633 return;
634
635 netif_addr_lock_bh(from);
636 netif_addr_lock(to);
637 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
638 __dev_set_rx_mode(to);
639 netif_addr_unlock(to);
640 netif_addr_unlock_bh(from);
641}
642EXPORT_SYMBOL(dev_mc_unsync);
643
644/**
645 * dev_mc_flush - Flush multicast addresses
646 * @dev: device
647 *
648 * Flush multicast addresses.
649 */
650void dev_mc_flush(struct net_device *dev)
651{
652 netif_addr_lock_bh(dev);
653 __hw_addr_flush(&dev->mc);
654 netif_addr_unlock_bh(dev);
655}
656EXPORT_SYMBOL(dev_mc_flush);
657
658/**
659 * dev_mc_flush - Init multicast address list
660 * @dev: device
661 *
662 * Init multicast address list.
663 */
664void dev_mc_init(struct net_device *dev)
665{
666 __hw_addr_init(&dev->mc);
667}
668EXPORT_SYMBOL(dev_mc_init);
669
670#ifdef CONFIG_PROC_FS
671#include <linux/seq_file.h>
672
673static int dev_mc_seq_show(struct seq_file *seq, void *v)
674{
675 struct netdev_hw_addr *ha;
676 struct net_device *dev = v;
677
678 if (v == SEQ_START_TOKEN)
679 return 0;
680
681 netif_addr_lock_bh(dev);
682 netdev_for_each_mc_addr(ha, dev) {
683 int i;
684
685 seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
686 dev->name, ha->refcount, ha->global_use);
687
688 for (i = 0; i < dev->addr_len; i++)
689 seq_printf(seq, "%02x", ha->addr[i]);
690
691 seq_putc(seq, '\n');
692 }
693 netif_addr_unlock_bh(dev);
694 return 0;
695}
696
697static const struct seq_operations dev_mc_seq_ops = {
698 .start = dev_seq_start,
699 .next = dev_seq_next,
700 .stop = dev_seq_stop,
701 .show = dev_mc_seq_show,
702};
703
704static int dev_mc_seq_open(struct inode *inode, struct file *file)
705{
706 return seq_open_net(inode, file, &dev_mc_seq_ops,
707 sizeof(struct seq_net_private));
708}
709
710static const struct file_operations dev_mc_seq_fops = {
711 .owner = THIS_MODULE,
712 .open = dev_mc_seq_open,
713 .read = seq_read,
714 .llseek = seq_lseek,
715 .release = seq_release_net,
716};
717
718#endif
719
720static int __net_init dev_mc_net_init(struct net *net)
721{
722 if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
723 return -ENOMEM;
724 return 0;
725}
726
727static void __net_exit dev_mc_net_exit(struct net *net)
728{
729 proc_net_remove(net, "dev_mcast");
730}
731
732static struct pernet_operations __net_initdata dev_mc_net_ops = {
733 .init = dev_mc_net_init,
734 .exit = dev_mc_net_exit,
735};
736
737void __init dev_mcast_init(void)
738{
739 register_pernet_subsys(&dev_mc_net_ops);
740}
741
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
deleted file mode 100644
index 3dc295beb483..000000000000
--- a/net/core/dev_mcast.c
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 * Linux NET3: Multicast List maintenance.
3 *
4 * Authors:
5 * Tim Kordas <tjk@nostromo.eeap.cwru.edu>
6 * Richard Underwood <richard@wuzz.demon.co.uk>
7 *
8 * Stir fried together from the IP multicast and CAP patches above
9 * Alan Cox <alan@lxorguk.ukuu.org.uk>
10 *
11 * Fixes:
12 * Alan Cox : Update the device on a real delete
13 * rather than any time but...
14 * Alan Cox : IFF_ALLMULTI support.
15 * Alan Cox : New format set_multicast_list() calls.
16 * Gleb Natapov : Remove dev_mc_lock.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24#include <linux/module.h>
25#include <asm/uaccess.h>
26#include <asm/system.h>
27#include <linux/bitops.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/string.h>
31#include <linux/mm.h>
32#include <linux/socket.h>
33#include <linux/sockios.h>
34#include <linux/in.h>
35#include <linux/errno.h>
36#include <linux/interrupt.h>
37#include <linux/if_ether.h>
38#include <linux/inet.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/proc_fs.h>
42#include <linux/seq_file.h>
43#include <linux/init.h>
44#include <net/net_namespace.h>
45#include <net/ip.h>
46#include <net/route.h>
47#include <linux/skbuff.h>
48#include <net/sock.h>
49#include <net/arp.h>
50
51
52/*
53 * Device multicast list maintenance.
54 *
55 * This is used both by IP and by the user level maintenance functions.
56 * Unlike BSD we maintain a usage count on a given multicast address so
57 * that a casual user application can add/delete multicasts used by
58 * protocols without doing damage to the protocols when it deletes the
59 * entries. It also helps IP as it tracks overlapping maps.
60 *
61 * Device mc lists are changed by bh at least if IPv6 is enabled,
62 * so that it must be bh protected.
63 *
64 * We block accesses to device mc filters with netif_tx_lock.
65 */
66
67/*
68 * Delete a device level multicast
69 */
70
71int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
72{
73 int err;
74
75 netif_addr_lock_bh(dev);
76 err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
77 addr, alen, glbl);
78 if (!err) {
79 /*
80 * We have altered the list, so the card
81 * loaded filter is now wrong. Fix it
82 */
83
84 __dev_set_rx_mode(dev);
85 }
86 netif_addr_unlock_bh(dev);
87 return err;
88}
89
90/*
91 * Add a device level multicast
92 */
93
94int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
95{
96 int err;
97
98 netif_addr_lock_bh(dev);
99 if (alen != dev->addr_len)
100 err = -EINVAL;
101 else
102 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
103 if (!err)
104 __dev_set_rx_mode(dev);
105 netif_addr_unlock_bh(dev);
106 return err;
107}
108
109/**
110 * dev_mc_sync - Synchronize device's multicast list to another device
111 * @to: destination device
112 * @from: source device
113 *
114 * Add newly added addresses to the destination device and release
115 * addresses that have no users left. The source device must be
116 * locked by netif_tx_lock_bh.
117 *
118 * This function is intended to be called from the dev->set_multicast_list
119 * or dev->set_rx_mode function of layered software devices.
120 */
121int dev_mc_sync(struct net_device *to, struct net_device *from)
122{
123 int err = 0;
124
125 netif_addr_lock_bh(to);
126 err = __dev_addr_sync(&to->mc_list, &to->mc_count,
127 &from->mc_list, &from->mc_count);
128 if (!err)
129 __dev_set_rx_mode(to);
130 netif_addr_unlock_bh(to);
131
132 return err;
133}
134EXPORT_SYMBOL(dev_mc_sync);
135
136
137/**
138 * dev_mc_unsync - Remove synchronized addresses from the destination
139 * device
140 * @to: destination device
141 * @from: source device
142 *
143 * Remove all addresses that were added to the destination device by
144 * dev_mc_sync(). This function is intended to be called from the
145 * dev->stop function of layered software devices.
146 */
147void dev_mc_unsync(struct net_device *to, struct net_device *from)
148{
149 netif_addr_lock_bh(from);
150 netif_addr_lock(to);
151
152 __dev_addr_unsync(&to->mc_list, &to->mc_count,
153 &from->mc_list, &from->mc_count);
154 __dev_set_rx_mode(to);
155
156 netif_addr_unlock(to);
157 netif_addr_unlock_bh(from);
158}
159EXPORT_SYMBOL(dev_mc_unsync);
160
161#ifdef CONFIG_PROC_FS
162static int dev_mc_seq_show(struct seq_file *seq, void *v)
163{
164 struct dev_addr_list *m;
165 struct net_device *dev = v;
166
167 if (v == SEQ_START_TOKEN)
168 return 0;
169
170 netif_addr_lock_bh(dev);
171 for (m = dev->mc_list; m; m = m->next) {
172 int i;
173
174 seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
175 dev->name, m->dmi_users, m->dmi_gusers);
176
177 for (i = 0; i < m->dmi_addrlen; i++)
178 seq_printf(seq, "%02x", m->dmi_addr[i]);
179
180 seq_putc(seq, '\n');
181 }
182 netif_addr_unlock_bh(dev);
183 return 0;
184}
185
186static const struct seq_operations dev_mc_seq_ops = {
187 .start = dev_seq_start,
188 .next = dev_seq_next,
189 .stop = dev_seq_stop,
190 .show = dev_mc_seq_show,
191};
192
193static int dev_mc_seq_open(struct inode *inode, struct file *file)
194{
195 return seq_open_net(inode, file, &dev_mc_seq_ops,
196 sizeof(struct seq_net_private));
197}
198
199static const struct file_operations dev_mc_seq_fops = {
200 .owner = THIS_MODULE,
201 .open = dev_mc_seq_open,
202 .read = seq_read,
203 .llseek = seq_lseek,
204 .release = seq_release_net,
205};
206
207#endif
208
209static int __net_init dev_mc_net_init(struct net *net)
210{
211 if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
212 return -ENOMEM;
213 return 0;
214}
215
216static void __net_exit dev_mc_net_exit(struct net *net)
217{
218 proc_net_remove(net, "dev_mcast");
219}
220
221static struct pernet_operations __net_initdata dev_mc_net_ops = {
222 .init = dev_mc_net_init,
223 .exit = dev_mc_net_exit,
224};
225
226void __init dev_mcast_init(void)
227{
228 register_pernet_subsys(&dev_mc_net_ops);
229}
230
231EXPORT_SYMBOL(dev_mc_add);
232EXPORT_SYMBOL(dev_mc_delete);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index f8c874975350..cf208d8042b1 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -21,6 +21,7 @@
21#include <linux/percpu.h> 21#include <linux/percpu.h>
22#include <linux/timer.h> 22#include <linux/timer.h>
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <linux/slab.h>
24#include <net/genetlink.h> 25#include <net/genetlink.h>
25#include <net/netevent.h> 26#include <net/netevent.h>
26 27
diff --git a/net/core/dst.c b/net/core/dst.c
index cb1b3488b739..9920722cc82b 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -12,6 +12,7 @@
12#include <linux/workqueue.h> 12#include <linux/workqueue.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
17#include <linux/string.h> 18#include <linux/string.h>
@@ -43,7 +44,7 @@ static atomic_t dst_total = ATOMIC_INIT(0);
43 */ 44 */
44static struct { 45static struct {
45 spinlock_t lock; 46 spinlock_t lock;
46 struct dst_entry *list; 47 struct dst_entry *list;
47 unsigned long timer_inc; 48 unsigned long timer_inc;
48 unsigned long timer_expires; 49 unsigned long timer_expires;
49} dst_garbage = { 50} dst_garbage = {
@@ -51,7 +52,7 @@ static struct {
51 .timer_inc = DST_GC_MAX, 52 .timer_inc = DST_GC_MAX,
52}; 53};
53static void dst_gc_task(struct work_struct *work); 54static void dst_gc_task(struct work_struct *work);
54static void ___dst_free(struct dst_entry * dst); 55static void ___dst_free(struct dst_entry *dst);
55 56
56static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); 57static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
57 58
@@ -135,8 +136,8 @@ loop:
135 } 136 }
136 expires = dst_garbage.timer_expires; 137 expires = dst_garbage.timer_expires;
137 /* 138 /*
138 * if the next desired timer is more than 4 seconds in the future 139 * if the next desired timer is more than 4 seconds in the
139 * then round the timer to whole seconds 140 * future then round the timer to whole seconds
140 */ 141 */
141 if (expires > 4*HZ) 142 if (expires > 4*HZ)
142 expires = round_jiffies_relative(expires); 143 expires = round_jiffies_relative(expires);
@@ -151,7 +152,8 @@ loop:
151 " expires: %lu elapsed: %lu us\n", 152 " expires: %lu elapsed: %lu us\n",
152 atomic_read(&dst_total), delayed, work_performed, 153 atomic_read(&dst_total), delayed, work_performed,
153 expires, 154 expires,
154 elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); 155 elapsed.tv_sec * USEC_PER_SEC +
156 elapsed.tv_nsec / NSEC_PER_USEC);
155#endif 157#endif
156} 158}
157 159
@@ -162,9 +164,9 @@ int dst_discard(struct sk_buff *skb)
162} 164}
163EXPORT_SYMBOL(dst_discard); 165EXPORT_SYMBOL(dst_discard);
164 166
165void * dst_alloc(struct dst_ops * ops) 167void *dst_alloc(struct dst_ops *ops)
166{ 168{
167 struct dst_entry * dst; 169 struct dst_entry *dst;
168 170
169 if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { 171 if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
170 if (ops->gc(ops)) 172 if (ops->gc(ops))
@@ -184,19 +186,20 @@ void * dst_alloc(struct dst_ops * ops)
184 atomic_inc(&ops->entries); 186 atomic_inc(&ops->entries);
185 return dst; 187 return dst;
186} 188}
189EXPORT_SYMBOL(dst_alloc);
187 190
188static void ___dst_free(struct dst_entry * dst) 191static void ___dst_free(struct dst_entry *dst)
189{ 192{
190 /* The first case (dev==NULL) is required, when 193 /* The first case (dev==NULL) is required, when
191 protocol module is unloaded. 194 protocol module is unloaded.
192 */ 195 */
193 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { 196 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
194 dst->input = dst->output = dst_discard; 197 dst->input = dst->output = dst_discard;
195 }
196 dst->obsolete = 2; 198 dst->obsolete = 2;
197} 199}
200EXPORT_SYMBOL(__dst_free);
198 201
199void __dst_free(struct dst_entry * dst) 202void __dst_free(struct dst_entry *dst)
200{ 203{
201 spin_lock_bh(&dst_garbage.lock); 204 spin_lock_bh(&dst_garbage.lock);
202 ___dst_free(dst); 205 ___dst_free(dst);
@@ -261,15 +264,16 @@ again:
261 } 264 }
262 return NULL; 265 return NULL;
263} 266}
267EXPORT_SYMBOL(dst_destroy);
264 268
265void dst_release(struct dst_entry *dst) 269void dst_release(struct dst_entry *dst)
266{ 270{
267 if (dst) { 271 if (dst) {
268 int newrefcnt; 272 int newrefcnt;
269 273
270 smp_mb__before_atomic_dec(); 274 smp_mb__before_atomic_dec();
271 newrefcnt = atomic_dec_return(&dst->__refcnt); 275 newrefcnt = atomic_dec_return(&dst->__refcnt);
272 WARN_ON(newrefcnt < 0); 276 WARN_ON(newrefcnt < 0);
273 } 277 }
274} 278}
275EXPORT_SYMBOL(dst_release); 279EXPORT_SYMBOL(dst_release);
@@ -282,8 +286,8 @@ EXPORT_SYMBOL(dst_release);
282 * 286 *
283 * Commented and originally written by Alexey. 287 * Commented and originally written by Alexey.
284 */ 288 */
285static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, 289static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
286 int unregister) 290 int unregister)
287{ 291{
288 if (dst->ops->ifdown) 292 if (dst->ops->ifdown)
289 dst->ops->ifdown(dst, dev, unregister); 293 dst->ops->ifdown(dst, dev, unregister);
@@ -305,7 +309,8 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
305 } 309 }
306} 310}
307 311
308static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 312static int dst_dev_event(struct notifier_block *this, unsigned long event,
313 void *ptr)
309{ 314{
310 struct net_device *dev = ptr; 315 struct net_device *dev = ptr;
311 struct dst_entry *dst, *last = NULL; 316 struct dst_entry *dst, *last = NULL;
@@ -328,9 +333,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void
328 last->next = dst; 333 last->next = dst;
329 else 334 else
330 dst_busy_list = dst; 335 dst_busy_list = dst;
331 for (; dst; dst = dst->next) { 336 for (; dst; dst = dst->next)
332 dst_ifdown(dst, dev, event != NETDEV_DOWN); 337 dst_ifdown(dst, dev, event != NETDEV_DOWN);
333 }
334 mutex_unlock(&dst_gc_mutex); 338 mutex_unlock(&dst_gc_mutex);
335 break; 339 break;
336 } 340 }
@@ -345,7 +349,3 @@ void __init dst_init(void)
345{ 349{
346 register_netdevice_notifier(&dst_dev_notifier); 350 register_netdevice_notifier(&dst_dev_notifier);
347} 351}
348
349EXPORT_SYMBOL(__dst_free);
350EXPORT_SYMBOL(dst_alloc);
351EXPORT_SYMBOL(dst_destroy);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f4cb6b6299d9..1a7db92037fa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -18,7 +18,8 @@
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <asm/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/slab.h>
22 23
23/* 24/*
24 * Some useful ethtool_ops methods that're device independent. 25 * Some useful ethtool_ops methods that're device independent.
@@ -30,6 +31,7 @@ u32 ethtool_op_get_link(struct net_device *dev)
30{ 31{
31 return netif_carrier_ok(dev) ? 1 : 0; 32 return netif_carrier_ok(dev) ? 1 : 0;
32} 33}
34EXPORT_SYMBOL(ethtool_op_get_link);
33 35
34u32 ethtool_op_get_rx_csum(struct net_device *dev) 36u32 ethtool_op_get_rx_csum(struct net_device *dev)
35{ 37{
@@ -62,6 +64,7 @@ int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
62 64
63 return 0; 65 return 0;
64} 66}
67EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
65 68
66int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) 69int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
67{ 70{
@@ -72,11 +75,13 @@ int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
72 75
73 return 0; 76 return 0;
74} 77}
78EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
75 79
76u32 ethtool_op_get_sg(struct net_device *dev) 80u32 ethtool_op_get_sg(struct net_device *dev)
77{ 81{
78 return (dev->features & NETIF_F_SG) != 0; 82 return (dev->features & NETIF_F_SG) != 0;
79} 83}
84EXPORT_SYMBOL(ethtool_op_get_sg);
80 85
81int ethtool_op_set_sg(struct net_device *dev, u32 data) 86int ethtool_op_set_sg(struct net_device *dev, u32 data)
82{ 87{
@@ -87,11 +92,13 @@ int ethtool_op_set_sg(struct net_device *dev, u32 data)
87 92
88 return 0; 93 return 0;
89} 94}
95EXPORT_SYMBOL(ethtool_op_set_sg);
90 96
91u32 ethtool_op_get_tso(struct net_device *dev) 97u32 ethtool_op_get_tso(struct net_device *dev)
92{ 98{
93 return (dev->features & NETIF_F_TSO) != 0; 99 return (dev->features & NETIF_F_TSO) != 0;
94} 100}
101EXPORT_SYMBOL(ethtool_op_get_tso);
95 102
96int ethtool_op_set_tso(struct net_device *dev, u32 data) 103int ethtool_op_set_tso(struct net_device *dev, u32 data)
97{ 104{
@@ -102,11 +109,13 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data)
102 109
103 return 0; 110 return 0;
104} 111}
112EXPORT_SYMBOL(ethtool_op_set_tso);
105 113
106u32 ethtool_op_get_ufo(struct net_device *dev) 114u32 ethtool_op_get_ufo(struct net_device *dev)
107{ 115{
108 return (dev->features & NETIF_F_UFO) != 0; 116 return (dev->features & NETIF_F_UFO) != 0;
109} 117}
118EXPORT_SYMBOL(ethtool_op_get_ufo);
110 119
111int ethtool_op_set_ufo(struct net_device *dev, u32 data) 120int ethtool_op_set_ufo(struct net_device *dev, u32 data)
112{ 121{
@@ -116,12 +125,13 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
116 dev->features &= ~NETIF_F_UFO; 125 dev->features &= ~NETIF_F_UFO;
117 return 0; 126 return 0;
118} 127}
128EXPORT_SYMBOL(ethtool_op_set_ufo);
119 129
120/* the following list of flags are the same as their associated 130/* the following list of flags are the same as their associated
121 * NETIF_F_xxx values in include/linux/netdevice.h 131 * NETIF_F_xxx values in include/linux/netdevice.h
122 */ 132 */
123static const u32 flags_dup_features = 133static const u32 flags_dup_features =
124 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE); 134 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
125 135
126u32 ethtool_op_get_flags(struct net_device *dev) 136u32 ethtool_op_get_flags(struct net_device *dev)
127{ 137{
@@ -132,6 +142,7 @@ u32 ethtool_op_get_flags(struct net_device *dev)
132 142
133 return dev->features & flags_dup_features; 143 return dev->features & flags_dup_features;
134} 144}
145EXPORT_SYMBOL(ethtool_op_get_flags);
135 146
136int ethtool_op_set_flags(struct net_device *dev, u32 data) 147int ethtool_op_set_flags(struct net_device *dev, u32 data)
137{ 148{
@@ -152,9 +163,15 @@ int ethtool_op_set_flags(struct net_device *dev, u32 data)
152 features &= ~NETIF_F_NTUPLE; 163 features &= ~NETIF_F_NTUPLE;
153 } 164 }
154 165
166 if (data & ETH_FLAG_RXHASH)
167 features |= NETIF_F_RXHASH;
168 else
169 features &= ~NETIF_F_RXHASH;
170
155 dev->features = features; 171 dev->features = features;
156 return 0; 172 return 0;
157} 173}
174EXPORT_SYMBOL(ethtool_op_set_flags);
158 175
159void ethtool_ntuple_flush(struct net_device *dev) 176void ethtool_ntuple_flush(struct net_device *dev)
160{ 177{
@@ -200,7 +217,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
200 return dev->ethtool_ops->set_settings(dev, &cmd); 217 return dev->ethtool_ops->set_settings(dev, &cmd);
201} 218}
202 219
203static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) 220static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
221 void __user *useraddr)
204{ 222{
205 struct ethtool_drvinfo info; 223 struct ethtool_drvinfo info;
206 const struct ethtool_ops *ops = dev->ethtool_ops; 224 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -240,7 +258,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void _
240} 258}
241 259
242static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, 260static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
243 void __user *useraddr) 261 void __user *useraddr)
244{ 262{
245 struct ethtool_sset_info info; 263 struct ethtool_sset_info info;
246 const struct ethtool_ops *ops = dev->ethtool_ops; 264 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -299,7 +317,8 @@ out:
299 return ret; 317 return ret;
300} 318}
301 319
302static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) 320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
321 void __user *useraddr)
303{ 322{
304 struct ethtool_rxnfc cmd; 323 struct ethtool_rxnfc cmd;
305 324
@@ -312,7 +331,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __u
312 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 331 return dev->ethtool_ops->set_rxnfc(dev, &cmd);
313} 332}
314 333
315static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) 334static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
335 void __user *useraddr)
316{ 336{
317 struct ethtool_rxnfc info; 337 struct ethtool_rxnfc info;
318 const struct ethtool_ops *ops = dev->ethtool_ops; 338 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -357,8 +377,8 @@ err_out:
357} 377}
358 378
359static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, 379static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
360 struct ethtool_rx_ntuple_flow_spec *spec, 380 struct ethtool_rx_ntuple_flow_spec *spec,
361 struct ethtool_rx_ntuple_flow_spec_container *fsc) 381 struct ethtool_rx_ntuple_flow_spec_container *fsc)
362{ 382{
363 383
364 /* don't add filters forever */ 384 /* don't add filters forever */
@@ -384,7 +404,8 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
384 list->count++; 404 list->count++;
385} 405}
386 406
387static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) 407static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
408 void __user *useraddr)
388{ 409{
389 struct ethtool_rx_ntuple cmd; 410 struct ethtool_rx_ntuple cmd;
390 const struct ethtool_ops *ops = dev->ethtool_ops; 411 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -509,125 +530,125 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
509 case UDP_V4_FLOW: 530 case UDP_V4_FLOW:
510 case SCTP_V4_FLOW: 531 case SCTP_V4_FLOW:
511 sprintf(p, "\tSrc IP addr: 0x%x\n", 532 sprintf(p, "\tSrc IP addr: 0x%x\n",
512 fsc->fs.h_u.tcp_ip4_spec.ip4src); 533 fsc->fs.h_u.tcp_ip4_spec.ip4src);
513 p += ETH_GSTRING_LEN; 534 p += ETH_GSTRING_LEN;
514 num_strings++; 535 num_strings++;
515 sprintf(p, "\tSrc IP mask: 0x%x\n", 536 sprintf(p, "\tSrc IP mask: 0x%x\n",
516 fsc->fs.m_u.tcp_ip4_spec.ip4src); 537 fsc->fs.m_u.tcp_ip4_spec.ip4src);
517 p += ETH_GSTRING_LEN; 538 p += ETH_GSTRING_LEN;
518 num_strings++; 539 num_strings++;
519 sprintf(p, "\tDest IP addr: 0x%x\n", 540 sprintf(p, "\tDest IP addr: 0x%x\n",
520 fsc->fs.h_u.tcp_ip4_spec.ip4dst); 541 fsc->fs.h_u.tcp_ip4_spec.ip4dst);
521 p += ETH_GSTRING_LEN; 542 p += ETH_GSTRING_LEN;
522 num_strings++; 543 num_strings++;
523 sprintf(p, "\tDest IP mask: 0x%x\n", 544 sprintf(p, "\tDest IP mask: 0x%x\n",
524 fsc->fs.m_u.tcp_ip4_spec.ip4dst); 545 fsc->fs.m_u.tcp_ip4_spec.ip4dst);
525 p += ETH_GSTRING_LEN; 546 p += ETH_GSTRING_LEN;
526 num_strings++; 547 num_strings++;
527 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", 548 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
528 fsc->fs.h_u.tcp_ip4_spec.psrc, 549 fsc->fs.h_u.tcp_ip4_spec.psrc,
529 fsc->fs.m_u.tcp_ip4_spec.psrc); 550 fsc->fs.m_u.tcp_ip4_spec.psrc);
530 p += ETH_GSTRING_LEN; 551 p += ETH_GSTRING_LEN;
531 num_strings++; 552 num_strings++;
532 sprintf(p, "\tDest Port: %d, mask: 0x%x\n", 553 sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
533 fsc->fs.h_u.tcp_ip4_spec.pdst, 554 fsc->fs.h_u.tcp_ip4_spec.pdst,
534 fsc->fs.m_u.tcp_ip4_spec.pdst); 555 fsc->fs.m_u.tcp_ip4_spec.pdst);
535 p += ETH_GSTRING_LEN; 556 p += ETH_GSTRING_LEN;
536 num_strings++; 557 num_strings++;
537 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 558 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
538 fsc->fs.h_u.tcp_ip4_spec.tos, 559 fsc->fs.h_u.tcp_ip4_spec.tos,
539 fsc->fs.m_u.tcp_ip4_spec.tos); 560 fsc->fs.m_u.tcp_ip4_spec.tos);
540 p += ETH_GSTRING_LEN; 561 p += ETH_GSTRING_LEN;
541 num_strings++; 562 num_strings++;
542 break; 563 break;
543 case AH_ESP_V4_FLOW: 564 case AH_ESP_V4_FLOW:
544 case ESP_V4_FLOW: 565 case ESP_V4_FLOW:
545 sprintf(p, "\tSrc IP addr: 0x%x\n", 566 sprintf(p, "\tSrc IP addr: 0x%x\n",
546 fsc->fs.h_u.ah_ip4_spec.ip4src); 567 fsc->fs.h_u.ah_ip4_spec.ip4src);
547 p += ETH_GSTRING_LEN; 568 p += ETH_GSTRING_LEN;
548 num_strings++; 569 num_strings++;
549 sprintf(p, "\tSrc IP mask: 0x%x\n", 570 sprintf(p, "\tSrc IP mask: 0x%x\n",
550 fsc->fs.m_u.ah_ip4_spec.ip4src); 571 fsc->fs.m_u.ah_ip4_spec.ip4src);
551 p += ETH_GSTRING_LEN; 572 p += ETH_GSTRING_LEN;
552 num_strings++; 573 num_strings++;
553 sprintf(p, "\tDest IP addr: 0x%x\n", 574 sprintf(p, "\tDest IP addr: 0x%x\n",
554 fsc->fs.h_u.ah_ip4_spec.ip4dst); 575 fsc->fs.h_u.ah_ip4_spec.ip4dst);
555 p += ETH_GSTRING_LEN; 576 p += ETH_GSTRING_LEN;
556 num_strings++; 577 num_strings++;
557 sprintf(p, "\tDest IP mask: 0x%x\n", 578 sprintf(p, "\tDest IP mask: 0x%x\n",
558 fsc->fs.m_u.ah_ip4_spec.ip4dst); 579 fsc->fs.m_u.ah_ip4_spec.ip4dst);
559 p += ETH_GSTRING_LEN; 580 p += ETH_GSTRING_LEN;
560 num_strings++; 581 num_strings++;
561 sprintf(p, "\tSPI: %d, mask: 0x%x\n", 582 sprintf(p, "\tSPI: %d, mask: 0x%x\n",
562 fsc->fs.h_u.ah_ip4_spec.spi, 583 fsc->fs.h_u.ah_ip4_spec.spi,
563 fsc->fs.m_u.ah_ip4_spec.spi); 584 fsc->fs.m_u.ah_ip4_spec.spi);
564 p += ETH_GSTRING_LEN; 585 p += ETH_GSTRING_LEN;
565 num_strings++; 586 num_strings++;
566 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 587 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
567 fsc->fs.h_u.ah_ip4_spec.tos, 588 fsc->fs.h_u.ah_ip4_spec.tos,
568 fsc->fs.m_u.ah_ip4_spec.tos); 589 fsc->fs.m_u.ah_ip4_spec.tos);
569 p += ETH_GSTRING_LEN; 590 p += ETH_GSTRING_LEN;
570 num_strings++; 591 num_strings++;
571 break; 592 break;
572 case IP_USER_FLOW: 593 case IP_USER_FLOW:
573 sprintf(p, "\tSrc IP addr: 0x%x\n", 594 sprintf(p, "\tSrc IP addr: 0x%x\n",
574 fsc->fs.h_u.raw_ip4_spec.ip4src); 595 fsc->fs.h_u.raw_ip4_spec.ip4src);
575 p += ETH_GSTRING_LEN; 596 p += ETH_GSTRING_LEN;
576 num_strings++; 597 num_strings++;
577 sprintf(p, "\tSrc IP mask: 0x%x\n", 598 sprintf(p, "\tSrc IP mask: 0x%x\n",
578 fsc->fs.m_u.raw_ip4_spec.ip4src); 599 fsc->fs.m_u.raw_ip4_spec.ip4src);
579 p += ETH_GSTRING_LEN; 600 p += ETH_GSTRING_LEN;
580 num_strings++; 601 num_strings++;
581 sprintf(p, "\tDest IP addr: 0x%x\n", 602 sprintf(p, "\tDest IP addr: 0x%x\n",
582 fsc->fs.h_u.raw_ip4_spec.ip4dst); 603 fsc->fs.h_u.raw_ip4_spec.ip4dst);
583 p += ETH_GSTRING_LEN; 604 p += ETH_GSTRING_LEN;
584 num_strings++; 605 num_strings++;
585 sprintf(p, "\tDest IP mask: 0x%x\n", 606 sprintf(p, "\tDest IP mask: 0x%x\n",
586 fsc->fs.m_u.raw_ip4_spec.ip4dst); 607 fsc->fs.m_u.raw_ip4_spec.ip4dst);
587 p += ETH_GSTRING_LEN; 608 p += ETH_GSTRING_LEN;
588 num_strings++; 609 num_strings++;
589 break; 610 break;
590 case IPV4_FLOW: 611 case IPV4_FLOW:
591 sprintf(p, "\tSrc IP addr: 0x%x\n", 612 sprintf(p, "\tSrc IP addr: 0x%x\n",
592 fsc->fs.h_u.usr_ip4_spec.ip4src); 613 fsc->fs.h_u.usr_ip4_spec.ip4src);
593 p += ETH_GSTRING_LEN; 614 p += ETH_GSTRING_LEN;
594 num_strings++; 615 num_strings++;
595 sprintf(p, "\tSrc IP mask: 0x%x\n", 616 sprintf(p, "\tSrc IP mask: 0x%x\n",
596 fsc->fs.m_u.usr_ip4_spec.ip4src); 617 fsc->fs.m_u.usr_ip4_spec.ip4src);
597 p += ETH_GSTRING_LEN; 618 p += ETH_GSTRING_LEN;
598 num_strings++; 619 num_strings++;
599 sprintf(p, "\tDest IP addr: 0x%x\n", 620 sprintf(p, "\tDest IP addr: 0x%x\n",
600 fsc->fs.h_u.usr_ip4_spec.ip4dst); 621 fsc->fs.h_u.usr_ip4_spec.ip4dst);
601 p += ETH_GSTRING_LEN; 622 p += ETH_GSTRING_LEN;
602 num_strings++; 623 num_strings++;
603 sprintf(p, "\tDest IP mask: 0x%x\n", 624 sprintf(p, "\tDest IP mask: 0x%x\n",
604 fsc->fs.m_u.usr_ip4_spec.ip4dst); 625 fsc->fs.m_u.usr_ip4_spec.ip4dst);
605 p += ETH_GSTRING_LEN; 626 p += ETH_GSTRING_LEN;
606 num_strings++; 627 num_strings++;
607 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", 628 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
608 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, 629 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
609 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); 630 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
610 p += ETH_GSTRING_LEN; 631 p += ETH_GSTRING_LEN;
611 num_strings++; 632 num_strings++;
612 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 633 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
613 fsc->fs.h_u.usr_ip4_spec.tos, 634 fsc->fs.h_u.usr_ip4_spec.tos,
614 fsc->fs.m_u.usr_ip4_spec.tos); 635 fsc->fs.m_u.usr_ip4_spec.tos);
615 p += ETH_GSTRING_LEN; 636 p += ETH_GSTRING_LEN;
616 num_strings++; 637 num_strings++;
617 sprintf(p, "\tIP Version: %d, mask: 0x%x\n", 638 sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
618 fsc->fs.h_u.usr_ip4_spec.ip_ver, 639 fsc->fs.h_u.usr_ip4_spec.ip_ver,
619 fsc->fs.m_u.usr_ip4_spec.ip_ver); 640 fsc->fs.m_u.usr_ip4_spec.ip_ver);
620 p += ETH_GSTRING_LEN; 641 p += ETH_GSTRING_LEN;
621 num_strings++; 642 num_strings++;
622 sprintf(p, "\tProtocol: %d, mask: 0x%x\n", 643 sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
623 fsc->fs.h_u.usr_ip4_spec.proto, 644 fsc->fs.h_u.usr_ip4_spec.proto,
624 fsc->fs.m_u.usr_ip4_spec.proto); 645 fsc->fs.m_u.usr_ip4_spec.proto);
625 p += ETH_GSTRING_LEN; 646 p += ETH_GSTRING_LEN;
626 num_strings++; 647 num_strings++;
627 break; 648 break;
628 }; 649 };
629 sprintf(p, "\tVLAN: %d, mask: 0x%x\n", 650 sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
630 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); 651 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
631 p += ETH_GSTRING_LEN; 652 p += ETH_GSTRING_LEN;
632 num_strings++; 653 num_strings++;
633 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); 654 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
@@ -640,7 +661,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
640 sprintf(p, "\tAction: Drop\n"); 661 sprintf(p, "\tAction: Drop\n");
641 else 662 else
642 sprintf(p, "\tAction: Direct to queue %d\n", 663 sprintf(p, "\tAction: Direct to queue %d\n",
643 fsc->fs.action); 664 fsc->fs.action);
644 p += ETH_GSTRING_LEN; 665 p += ETH_GSTRING_LEN;
645 num_strings++; 666 num_strings++;
646unknown_filter: 667unknown_filter:
@@ -852,7 +873,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
852 return ret; 873 return ret;
853} 874}
854 875
855static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) 876static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
877 void __user *useraddr)
856{ 878{
857 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 879 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
858 880
@@ -866,7 +888,8 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void
866 return 0; 888 return 0;
867} 889}
868 890
869static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) 891static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
892 void __user *useraddr)
870{ 893{
871 struct ethtool_coalesce coalesce; 894 struct ethtool_coalesce coalesce;
872 895
@@ -970,6 +993,7 @@ static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
970 993
971 return dev->ethtool_ops->set_tx_csum(dev, edata.data); 994 return dev->ethtool_ops->set_tx_csum(dev, edata.data);
972} 995}
996EXPORT_SYMBOL(ethtool_op_set_tx_csum);
973 997
974static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) 998static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
975{ 999{
@@ -1041,7 +1065,7 @@ static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
1041 1065
1042 edata.data = dev->features & NETIF_F_GSO; 1066 edata.data = dev->features & NETIF_F_GSO;
1043 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1067 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1044 return -EFAULT; 1068 return -EFAULT;
1045 return 0; 1069 return 0;
1046} 1070}
1047 1071
@@ -1064,7 +1088,7 @@ static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
1064 1088
1065 edata.data = dev->features & NETIF_F_GRO; 1089 edata.data = dev->features & NETIF_F_GRO;
1066 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1090 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1067 return -EFAULT; 1091 return -EFAULT;
1068 return 0; 1092 return 0;
1069} 1093}
1070 1094
@@ -1276,7 +1300,8 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
1276 return actor(dev, edata.data); 1300 return actor(dev, edata.data);
1277} 1301}
1278 1302
1279static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) 1303static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1304 char __user *useraddr)
1280{ 1305{
1281 struct ethtool_flash efl; 1306 struct ethtool_flash efl;
1282 1307
@@ -1305,11 +1330,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1305 if (!dev->ethtool_ops) 1330 if (!dev->ethtool_ops)
1306 return -EOPNOTSUPP; 1331 return -EOPNOTSUPP;
1307 1332
1308 if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd))) 1333 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1309 return -EFAULT; 1334 return -EFAULT;
1310 1335
1311 /* Allow some commands to be done by anyone */ 1336 /* Allow some commands to be done by anyone */
1312 switch(ethcmd) { 1337 switch (ethcmd) {
1313 case ETHTOOL_GDRVINFO: 1338 case ETHTOOL_GDRVINFO:
1314 case ETHTOOL_GMSGLVL: 1339 case ETHTOOL_GMSGLVL:
1315 case ETHTOOL_GCOALESCE: 1340 case ETHTOOL_GCOALESCE:
@@ -1337,10 +1362,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1337 return -EPERM; 1362 return -EPERM;
1338 } 1363 }
1339 1364
1340 if (dev->ethtool_ops->begin) 1365 if (dev->ethtool_ops->begin) {
1341 if ((rc = dev->ethtool_ops->begin(dev)) < 0) 1366 rc = dev->ethtool_ops->begin(dev);
1367 if (rc < 0)
1342 return rc; 1368 return rc;
1343 1369 }
1344 old_features = dev->features; 1370 old_features = dev->features;
1345 1371
1346 switch (ethcmd) { 1372 switch (ethcmd) {
@@ -1530,16 +1556,3 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1530 1556
1531 return rc; 1557 return rc;
1532} 1558}
1533
1534EXPORT_SYMBOL(ethtool_op_get_link);
1535EXPORT_SYMBOL(ethtool_op_get_sg);
1536EXPORT_SYMBOL(ethtool_op_get_tso);
1537EXPORT_SYMBOL(ethtool_op_set_sg);
1538EXPORT_SYMBOL(ethtool_op_set_tso);
1539EXPORT_SYMBOL(ethtool_op_set_tx_csum);
1540EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
1541EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
1542EXPORT_SYMBOL(ethtool_op_set_ufo);
1543EXPORT_SYMBOL(ethtool_op_get_ufo);
1544EXPORT_SYMBOL(ethtool_op_set_flags);
1545EXPORT_SYMBOL(ethtool_op_get_flags);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 2ff34894357a..1bc66592453c 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/slab.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <net/net_namespace.h> 15#include <net/net_namespace.h>
15#include <net/sock.h> 16#include <net/sock.h>
@@ -38,6 +39,24 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
38} 39}
39EXPORT_SYMBOL(fib_default_rule_add); 40EXPORT_SYMBOL(fib_default_rule_add);
40 41
42u32 fib_default_rule_pref(struct fib_rules_ops *ops)
43{
44 struct list_head *pos;
45 struct fib_rule *rule;
46
47 if (!list_empty(&ops->rules_list)) {
48 pos = ops->rules_list.next;
49 if (pos->next != &ops->rules_list) {
50 rule = list_entry(pos->next, struct fib_rule, list);
51 if (rule->pref)
52 return rule->pref - 1;
53 }
54 }
55
56 return 0;
57}
58EXPORT_SYMBOL(fib_default_rule_pref);
59
41static void notify_rule_change(int event, struct fib_rule *rule, 60static void notify_rule_change(int event, struct fib_rule *rule,
42 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 61 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
43 u32 pid); 62 u32 pid);
@@ -516,6 +535,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
516 return -EMSGSIZE; 535 return -EMSGSIZE;
517 536
518 frh = nlmsg_data(nlh); 537 frh = nlmsg_data(nlh);
538 frh->family = ops->family;
519 frh->table = rule->table; 539 frh->table = rule->table;
520 NLA_PUT_U32(skb, FRA_TABLE, rule->table); 540 NLA_PUT_U32(skb, FRA_TABLE, rule->table);
521 frh->res1 = 0; 541 frh->res1 = 0;
diff --git a/net/core/filter.c b/net/core/filter.c
index d38ef7fd50f0..ff943bed21af 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -25,6 +25,7 @@
25#include <linux/inet.h> 25#include <linux/inet.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/if_packet.h> 27#include <linux/if_packet.h>
28#include <linux/gfp.h>
28#include <net/ip.h> 29#include <net/ip.h>
29#include <net/protocol.h> 30#include <net/protocol.h>
30#include <net/netlink.h> 31#include <net/netlink.h>
diff --git a/net/core/flow.c b/net/core/flow.c
index 96015871ecea..161900674009 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -26,113 +26,158 @@
26#include <linux/security.h> 26#include <linux/security.h>
27 27
28struct flow_cache_entry { 28struct flow_cache_entry {
29 struct flow_cache_entry *next; 29 union {
30 u16 family; 30 struct hlist_node hlist;
31 u8 dir; 31 struct list_head gc_list;
32 u32 genid; 32 } u;
33 struct flowi key; 33 u16 family;
34 void *object; 34 u8 dir;
35 atomic_t *object_ref; 35 u32 genid;
36 struct flowi key;
37 struct flow_cache_object *object;
36}; 38};
37 39
38atomic_t flow_cache_genid = ATOMIC_INIT(0); 40struct flow_cache_percpu {
39 41 struct hlist_head *hash_table;
40static u32 flow_hash_shift; 42 int hash_count;
41#define flow_hash_size (1 << flow_hash_shift) 43 u32 hash_rnd;
42static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; 44 int hash_rnd_recalc;
43 45 struct tasklet_struct flush_tasklet;
44#define flow_table(cpu) (per_cpu(flow_tables, cpu)) 46};
45
46static struct kmem_cache *flow_cachep __read_mostly;
47 47
48static int flow_lwm, flow_hwm; 48struct flow_flush_info {
49 struct flow_cache *cache;
50 atomic_t cpuleft;
51 struct completion completion;
52};
49 53
50struct flow_percpu_info { 54struct flow_cache {
51 int hash_rnd_recalc; 55 u32 hash_shift;
52 u32 hash_rnd; 56 unsigned long order;
53 int count; 57 struct flow_cache_percpu *percpu;
58 struct notifier_block hotcpu_notifier;
59 int low_watermark;
60 int high_watermark;
61 struct timer_list rnd_timer;
54}; 62};
55static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
56 63
57#define flow_hash_rnd_recalc(cpu) \ 64atomic_t flow_cache_genid = ATOMIC_INIT(0);
58 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) 65static struct flow_cache flow_cache_global;
59#define flow_hash_rnd(cpu) \ 66static struct kmem_cache *flow_cachep;
60 (per_cpu(flow_hash_info, cpu).hash_rnd)
61#define flow_count(cpu) \
62 (per_cpu(flow_hash_info, cpu).count)
63 67
64static struct timer_list flow_hash_rnd_timer; 68static DEFINE_SPINLOCK(flow_cache_gc_lock);
69static LIST_HEAD(flow_cache_gc_list);
65 70
66#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) 71#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
67 72#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
68struct flow_flush_info {
69 atomic_t cpuleft;
70 struct completion completion;
71};
72static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
73
74#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
75 73
76static void flow_cache_new_hashrnd(unsigned long arg) 74static void flow_cache_new_hashrnd(unsigned long arg)
77{ 75{
76 struct flow_cache *fc = (void *) arg;
78 int i; 77 int i;
79 78
80 for_each_possible_cpu(i) 79 for_each_possible_cpu(i)
81 flow_hash_rnd_recalc(i) = 1; 80 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
82 81
83 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; 82 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84 add_timer(&flow_hash_rnd_timer); 83 add_timer(&fc->rnd_timer);
84}
85
86static int flow_entry_valid(struct flow_cache_entry *fle)
87{
88 if (atomic_read(&flow_cache_genid) != fle->genid)
89 return 0;
90 if (fle->object && !fle->object->ops->check(fle->object))
91 return 0;
92 return 1;
85} 93}
86 94
87static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) 95static void flow_entry_kill(struct flow_cache_entry *fle)
88{ 96{
89 if (fle->object) 97 if (fle->object)
90 atomic_dec(fle->object_ref); 98 fle->object->ops->delete(fle->object);
91 kmem_cache_free(flow_cachep, fle); 99 kmem_cache_free(flow_cachep, fle);
92 flow_count(cpu)--;
93} 100}
94 101
95static void __flow_cache_shrink(int cpu, int shrink_to) 102static void flow_cache_gc_task(struct work_struct *work)
96{ 103{
97 struct flow_cache_entry *fle, **flp; 104 struct list_head gc_list;
98 int i; 105 struct flow_cache_entry *fce, *n;
99 106
100 for (i = 0; i < flow_hash_size; i++) { 107 INIT_LIST_HEAD(&gc_list);
101 int k = 0; 108 spin_lock_bh(&flow_cache_gc_lock);
109 list_splice_tail_init(&flow_cache_gc_list, &gc_list);
110 spin_unlock_bh(&flow_cache_gc_lock);
102 111
103 flp = &flow_table(cpu)[i]; 112 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
104 while ((fle = *flp) != NULL && k < shrink_to) { 113 flow_entry_kill(fce);
105 k++; 114}
106 flp = &fle->next; 115static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
107 } 116
108 while ((fle = *flp) != NULL) { 117static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
109 *flp = fle->next; 118 int deleted, struct list_head *gc_list)
110 flow_entry_kill(cpu, fle); 119{
111 } 120 if (deleted) {
121 fcp->hash_count -= deleted;
122 spin_lock_bh(&flow_cache_gc_lock);
123 list_splice_tail(gc_list, &flow_cache_gc_list);
124 spin_unlock_bh(&flow_cache_gc_lock);
125 schedule_work(&flow_cache_gc_work);
112 } 126 }
113} 127}
114 128
115static void flow_cache_shrink(int cpu) 129static void __flow_cache_shrink(struct flow_cache *fc,
130 struct flow_cache_percpu *fcp,
131 int shrink_to)
116{ 132{
117 int shrink_to = flow_lwm / flow_hash_size; 133 struct flow_cache_entry *fle;
134 struct hlist_node *entry, *tmp;
135 LIST_HEAD(gc_list);
136 int i, deleted = 0;
137
138 for (i = 0; i < flow_cache_hash_size(fc); i++) {
139 int saved = 0;
140
141 hlist_for_each_entry_safe(fle, entry, tmp,
142 &fcp->hash_table[i], u.hlist) {
143 if (saved < shrink_to &&
144 flow_entry_valid(fle)) {
145 saved++;
146 } else {
147 deleted++;
148 hlist_del(&fle->u.hlist);
149 list_add_tail(&fle->u.gc_list, &gc_list);
150 }
151 }
152 }
118 153
119 __flow_cache_shrink(cpu, shrink_to); 154 flow_cache_queue_garbage(fcp, deleted, &gc_list);
120} 155}
121 156
122static void flow_new_hash_rnd(int cpu) 157static void flow_cache_shrink(struct flow_cache *fc,
158 struct flow_cache_percpu *fcp)
123{ 159{
124 get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32)); 160 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
125 flow_hash_rnd_recalc(cpu) = 0;
126 161
127 __flow_cache_shrink(cpu, 0); 162 __flow_cache_shrink(fc, fcp, shrink_to);
128} 163}
129 164
130static u32 flow_hash_code(struct flowi *key, int cpu) 165static void flow_new_hash_rnd(struct flow_cache *fc,
166 struct flow_cache_percpu *fcp)
167{
168 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
169 fcp->hash_rnd_recalc = 0;
170 __flow_cache_shrink(fc, fcp, 0);
171}
172
173static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp,
175 struct flowi *key)
131{ 176{
132 u32 *k = (u32 *) key; 177 u32 *k = (u32 *) key;
133 178
134 return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) & 179 return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
135 (flow_hash_size - 1)); 180 & (flow_cache_hash_size(fc) - 1));
136} 181}
137 182
138#if (BITS_PER_LONG == 64) 183#if (BITS_PER_LONG == 64)
@@ -165,114 +210,117 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
165 return 0; 210 return 0;
166} 211}
167 212
168void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, 213struct flow_cache_object *
169 flow_resolve_t resolver) 214flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
215 flow_resolve_t resolver, void *ctx)
170{ 216{
171 struct flow_cache_entry *fle, **head; 217 struct flow_cache *fc = &flow_cache_global;
218 struct flow_cache_percpu *fcp;
219 struct flow_cache_entry *fle, *tfle;
220 struct hlist_node *entry;
221 struct flow_cache_object *flo;
172 unsigned int hash; 222 unsigned int hash;
173 int cpu;
174 223
175 local_bh_disable(); 224 local_bh_disable();
176 cpu = smp_processor_id(); 225 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
177 226
178 fle = NULL; 227 fle = NULL;
228 flo = NULL;
179 /* Packet really early in init? Making flow_cache_init a 229 /* Packet really early in init? Making flow_cache_init a
180 * pre-smp initcall would solve this. --RR */ 230 * pre-smp initcall would solve this. --RR */
181 if (!flow_table(cpu)) 231 if (!fcp->hash_table)
182 goto nocache; 232 goto nocache;
183 233
184 if (flow_hash_rnd_recalc(cpu)) 234 if (fcp->hash_rnd_recalc)
185 flow_new_hash_rnd(cpu); 235 flow_new_hash_rnd(fc, fcp);
186 hash = flow_hash_code(key, cpu);
187 236
188 head = &flow_table(cpu)[hash]; 237 hash = flow_hash_code(fc, fcp, key);
189 for (fle = *head; fle; fle = fle->next) { 238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
190 if (fle->family == family && 239 if (tfle->family == family &&
191 fle->dir == dir && 240 tfle->dir == dir &&
192 flow_key_compare(key, &fle->key) == 0) { 241 flow_key_compare(key, &tfle->key) == 0) {
193 if (fle->genid == atomic_read(&flow_cache_genid)) { 242 fle = tfle;
194 void *ret = fle->object;
195
196 if (ret)
197 atomic_inc(fle->object_ref);
198 local_bh_enable();
199
200 return ret;
201 }
202 break; 243 break;
203 } 244 }
204 } 245 }
205 246
206 if (!fle) { 247 if (unlikely(!fle)) {
207 if (flow_count(cpu) > flow_hwm) 248 if (fcp->hash_count > fc->high_watermark)
208 flow_cache_shrink(cpu); 249 flow_cache_shrink(fc, fcp);
209 250
210 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 251 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
211 if (fle) { 252 if (fle) {
212 fle->next = *head;
213 *head = fle;
214 fle->family = family; 253 fle->family = family;
215 fle->dir = dir; 254 fle->dir = dir;
216 memcpy(&fle->key, key, sizeof(*key)); 255 memcpy(&fle->key, key, sizeof(*key));
217 fle->object = NULL; 256 fle->object = NULL;
218 flow_count(cpu)++; 257 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
258 fcp->hash_count++;
219 } 259 }
260 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
261 flo = fle->object;
262 if (!flo)
263 goto ret_object;
264 flo = flo->ops->get(flo);
265 if (flo)
266 goto ret_object;
267 } else if (fle->object) {
268 flo = fle->object;
269 flo->ops->delete(flo);
270 fle->object = NULL;
220 } 271 }
221 272
222nocache: 273nocache:
223 { 274 flo = NULL;
224 int err; 275 if (fle) {
225 void *obj; 276 flo = fle->object;
226 atomic_t *obj_ref; 277 fle->object = NULL;
227
228 err = resolver(net, key, family, dir, &obj, &obj_ref);
229
230 if (fle && !err) {
231 fle->genid = atomic_read(&flow_cache_genid);
232
233 if (fle->object)
234 atomic_dec(fle->object_ref);
235
236 fle->object = obj;
237 fle->object_ref = obj_ref;
238 if (obj)
239 atomic_inc(fle->object_ref);
240 }
241 local_bh_enable();
242
243 if (err)
244 obj = ERR_PTR(err);
245 return obj;
246 } 278 }
279 flo = resolver(net, key, family, dir, flo, ctx);
280 if (fle) {
281 fle->genid = atomic_read(&flow_cache_genid);
282 if (!IS_ERR(flo))
283 fle->object = flo;
284 else
285 fle->genid--;
286 } else {
287 if (flo && !IS_ERR(flo))
288 flo->ops->delete(flo);
289 }
290ret_object:
291 local_bh_enable();
292 return flo;
247} 293}
248 294
249static void flow_cache_flush_tasklet(unsigned long data) 295static void flow_cache_flush_tasklet(unsigned long data)
250{ 296{
251 struct flow_flush_info *info = (void *)data; 297 struct flow_flush_info *info = (void *)data;
252 int i; 298 struct flow_cache *fc = info->cache;
253 int cpu; 299 struct flow_cache_percpu *fcp;
254 300 struct flow_cache_entry *fle;
255 cpu = smp_processor_id(); 301 struct hlist_node *entry, *tmp;
256 for (i = 0; i < flow_hash_size; i++) { 302 LIST_HEAD(gc_list);
257 struct flow_cache_entry *fle; 303 int i, deleted = 0;
258 304
259 fle = flow_table(cpu)[i]; 305 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
260 for (; fle; fle = fle->next) { 306 for (i = 0; i < flow_cache_hash_size(fc); i++) {
261 unsigned genid = atomic_read(&flow_cache_genid); 307 hlist_for_each_entry_safe(fle, entry, tmp,
262 308 &fcp->hash_table[i], u.hlist) {
263 if (!fle->object || fle->genid == genid) 309 if (flow_entry_valid(fle))
264 continue; 310 continue;
265 311
266 fle->object = NULL; 312 deleted++;
267 atomic_dec(fle->object_ref); 313 hlist_del(&fle->u.hlist);
314 list_add_tail(&fle->u.gc_list, &gc_list);
268 } 315 }
269 } 316 }
270 317
318 flow_cache_queue_garbage(fcp, deleted, &gc_list);
319
271 if (atomic_dec_and_test(&info->cpuleft)) 320 if (atomic_dec_and_test(&info->cpuleft))
272 complete(&info->completion); 321 complete(&info->completion);
273} 322}
274 323
275static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
276static void flow_cache_flush_per_cpu(void *data) 324static void flow_cache_flush_per_cpu(void *data)
277{ 325{
278 struct flow_flush_info *info = data; 326 struct flow_flush_info *info = data;
@@ -280,8 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
280 struct tasklet_struct *tasklet; 328 struct tasklet_struct *tasklet;
281 329
282 cpu = smp_processor_id(); 330 cpu = smp_processor_id();
283 331 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
284 tasklet = flow_flush_tasklet(cpu);
285 tasklet->data = (unsigned long)info; 332 tasklet->data = (unsigned long)info;
286 tasklet_schedule(tasklet); 333 tasklet_schedule(tasklet);
287} 334}
@@ -294,6 +341,7 @@ void flow_cache_flush(void)
294 /* Don't want cpus going down or up during this. */ 341 /* Don't want cpus going down or up during this. */
295 get_online_cpus(); 342 get_online_cpus();
296 mutex_lock(&flow_flush_sem); 343 mutex_lock(&flow_flush_sem);
344 info.cache = &flow_cache_global;
297 atomic_set(&info.cpuleft, num_online_cpus()); 345 atomic_set(&info.cpuleft, num_online_cpus());
298 init_completion(&info.completion); 346 init_completion(&info.completion);
299 347
@@ -307,62 +355,75 @@ void flow_cache_flush(void)
307 put_online_cpus(); 355 put_online_cpus();
308} 356}
309 357
310static void __init flow_cache_cpu_prepare(int cpu) 358static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
359 struct flow_cache_percpu *fcp)
311{ 360{
312 struct tasklet_struct *tasklet; 361 fcp->hash_table = (struct hlist_head *)
313 unsigned long order; 362 __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
314 363 if (!fcp->hash_table)
315 for (order = 0; 364 panic("NET: failed to allocate flow cache order %lu\n", fc->order);
316 (PAGE_SIZE << order) < 365
317 (sizeof(struct flow_cache_entry *)*flow_hash_size); 366 fcp->hash_rnd_recalc = 1;
318 order++) 367 fcp->hash_count = 0;
319 /* NOTHING */; 368 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
320
321 flow_table(cpu) = (struct flow_cache_entry **)
322 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
323 if (!flow_table(cpu))
324 panic("NET: failed to allocate flow cache order %lu\n", order);
325
326 flow_hash_rnd_recalc(cpu) = 1;
327 flow_count(cpu) = 0;
328
329 tasklet = flow_flush_tasklet(cpu);
330 tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
331} 369}
332 370
333static int flow_cache_cpu(struct notifier_block *nfb, 371static int flow_cache_cpu(struct notifier_block *nfb,
334 unsigned long action, 372 unsigned long action,
335 void *hcpu) 373 void *hcpu)
336{ 374{
375 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
376 int cpu = (unsigned long) hcpu;
377 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
378
337 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 379 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
338 __flow_cache_shrink((unsigned long)hcpu, 0); 380 __flow_cache_shrink(fc, fcp, 0);
339 return NOTIFY_OK; 381 return NOTIFY_OK;
340} 382}
341 383
342static int __init flow_cache_init(void) 384static int flow_cache_init(struct flow_cache *fc)
343{ 385{
386 unsigned long order;
344 int i; 387 int i;
345 388
346 flow_cachep = kmem_cache_create("flow_cache", 389 fc->hash_shift = 10;
347 sizeof(struct flow_cache_entry), 390 fc->low_watermark = 2 * flow_cache_hash_size(fc);
348 0, SLAB_PANIC, 391 fc->high_watermark = 4 * flow_cache_hash_size(fc);
349 NULL); 392
350 flow_hash_shift = 10; 393 for (order = 0;
351 flow_lwm = 2 * flow_hash_size; 394 (PAGE_SIZE << order) <
352 flow_hwm = 4 * flow_hash_size; 395 (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
396 order++)
397 /* NOTHING */;
398 fc->order = order;
399 fc->percpu = alloc_percpu(struct flow_cache_percpu);
353 400
354 setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0); 401 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
355 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; 402 (unsigned long) fc);
356 add_timer(&flow_hash_rnd_timer); 403 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
404 add_timer(&fc->rnd_timer);
357 405
358 for_each_possible_cpu(i) 406 for_each_possible_cpu(i)
359 flow_cache_cpu_prepare(i); 407 flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
408
409 fc->hotcpu_notifier = (struct notifier_block){
410 .notifier_call = flow_cache_cpu,
411 };
412 register_hotcpu_notifier(&fc->hotcpu_notifier);
360 413
361 hotcpu_notifier(flow_cache_cpu, 0);
362 return 0; 414 return 0;
363} 415}
364 416
365module_init(flow_cache_init); 417static int __init flow_cache_init_global(void)
418{
419 flow_cachep = kmem_cache_create("flow_cache",
420 sizeof(struct flow_cache_entry),
421 0, SLAB_PANIC, NULL);
422
423 return flow_cache_init(&flow_cache_global);
424}
425
426module_init(flow_cache_init_global);
366 427
367EXPORT_SYMBOL(flow_cache_genid); 428EXPORT_SYMBOL(flow_cache_genid);
368EXPORT_SYMBOL(flow_cache_lookup); 429EXPORT_SYMBOL(flow_cache_lookup);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 493775f4f2f1..cf8e70392fe0 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -32,6 +32,7 @@
32#include <linux/rtnetlink.h> 32#include <linux/rtnetlink.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/rbtree.h> 34#include <linux/rbtree.h>
35#include <linux/slab.h>
35#include <net/sock.h> 36#include <net/sock.h>
36#include <net/gen_stats.h> 37#include <net/gen_stats.h>
37 38
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 16ad45d4882b..1e7f4e91a935 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -20,7 +20,6 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/net.h> 23#include <linux/net.h>
25#include <linux/in6.h> 24#include <linux/in6.h>
26#include <asm/uaccess.h> 25#include <asm/uaccess.h>
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 5910b555a54a..bdbce2f5875b 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -19,7 +19,6 @@
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/workqueue.h> 22#include <linux/workqueue.h>
24#include <linux/bitops.h> 23#include <linux/bitops.h>
25#include <asm/types.h> 24#include <asm/types.h>
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6cee6434da67..bff37908bd55 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -15,6 +15,7 @@
15 * Harald Welte Add neighbour cache statistics like rtstat 15 * Harald Welte Add neighbour cache statistics like rtstat
16 */ 16 */
17 17
18#include <linux/slab.h>
18#include <linux/types.h> 19#include <linux/types.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/module.h> 21#include <linux/module.h>
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7a46343d5ae3..c57c4b228bb5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -13,9 +13,11 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/slab.h>
16#include <net/sock.h> 17#include <net/sock.h>
17#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
18#include <linux/wireless.h> 19#include <linux/wireless.h>
20#include <linux/vmalloc.h>
19#include <net/wext.h> 21#include <net/wext.h>
20 22
21#include "net-sysfs.h" 23#include "net-sysfs.h"
@@ -466,6 +468,7 @@ static struct attribute_group wireless_group = {
466}; 468};
467#endif 469#endif
468 470
471#ifdef CONFIG_RPS
469/* 472/*
470 * RX queue sysfs structures and functions. 473 * RX queue sysfs structures and functions.
471 */ 474 */
@@ -547,7 +550,7 @@ static void rps_map_release(struct rcu_head *rcu)
547 kfree(map); 550 kfree(map);
548} 551}
549 552
550ssize_t store_rps_map(struct netdev_rx_queue *queue, 553static ssize_t store_rps_map(struct netdev_rx_queue *queue,
551 struct rx_queue_attribute *attribute, 554 struct rx_queue_attribute *attribute,
552 const char *buf, size_t len) 555 const char *buf, size_t len)
553{ 556{
@@ -599,22 +602,109 @@ ssize_t store_rps_map(struct netdev_rx_queue *queue,
599 return len; 602 return len;
600} 603}
601 604
605static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
606 struct rx_queue_attribute *attr,
607 char *buf)
608{
609 struct rps_dev_flow_table *flow_table;
610 unsigned int val = 0;
611
612 rcu_read_lock();
613 flow_table = rcu_dereference(queue->rps_flow_table);
614 if (flow_table)
615 val = flow_table->mask + 1;
616 rcu_read_unlock();
617
618 return sprintf(buf, "%u\n", val);
619}
620
621static void rps_dev_flow_table_release_work(struct work_struct *work)
622{
623 struct rps_dev_flow_table *table = container_of(work,
624 struct rps_dev_flow_table, free_work);
625
626 vfree(table);
627}
628
629static void rps_dev_flow_table_release(struct rcu_head *rcu)
630{
631 struct rps_dev_flow_table *table = container_of(rcu,
632 struct rps_dev_flow_table, rcu);
633
634 INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
635 schedule_work(&table->free_work);
636}
637
638static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
639 struct rx_queue_attribute *attr,
640 const char *buf, size_t len)
641{
642 unsigned int count;
643 char *endp;
644 struct rps_dev_flow_table *table, *old_table;
645 static DEFINE_SPINLOCK(rps_dev_flow_lock);
646
647 if (!capable(CAP_NET_ADMIN))
648 return -EPERM;
649
650 count = simple_strtoul(buf, &endp, 0);
651 if (endp == buf)
652 return -EINVAL;
653
654 if (count) {
655 int i;
656
657 if (count > 1<<30) {
658 /* Enforce a limit to prevent overflow */
659 return -EINVAL;
660 }
661 count = roundup_pow_of_two(count);
662 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
663 if (!table)
664 return -ENOMEM;
665
666 table->mask = count - 1;
667 for (i = 0; i < count; i++)
668 table->flows[i].cpu = RPS_NO_CPU;
669 } else
670 table = NULL;
671
672 spin_lock(&rps_dev_flow_lock);
673 old_table = queue->rps_flow_table;
674 rcu_assign_pointer(queue->rps_flow_table, table);
675 spin_unlock(&rps_dev_flow_lock);
676
677 if (old_table)
678 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
679
680 return len;
681}
682
602static struct rx_queue_attribute rps_cpus_attribute = 683static struct rx_queue_attribute rps_cpus_attribute =
603 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); 684 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
604 685
686
687static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
688 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
689 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
690
605static struct attribute *rx_queue_default_attrs[] = { 691static struct attribute *rx_queue_default_attrs[] = {
606 &rps_cpus_attribute.attr, 692 &rps_cpus_attribute.attr,
693 &rps_dev_flow_table_cnt_attribute.attr,
607 NULL 694 NULL
608}; 695};
609 696
610static void rx_queue_release(struct kobject *kobj) 697static void rx_queue_release(struct kobject *kobj)
611{ 698{
612 struct netdev_rx_queue *queue = to_rx_queue(kobj); 699 struct netdev_rx_queue *queue = to_rx_queue(kobj);
613 struct rps_map *map = queue->rps_map;
614 struct netdev_rx_queue *first = queue->first; 700 struct netdev_rx_queue *first = queue->first;
615 701
616 if (map) 702 if (queue->rps_map)
617 call_rcu(&map->rcu, rps_map_release); 703 call_rcu(&queue->rps_map->rcu, rps_map_release);
704
705 if (queue->rps_flow_table)
706 call_rcu(&queue->rps_flow_table->rcu,
707 rps_dev_flow_table_release);
618 708
619 if (atomic_dec_and_test(&first->count)) 709 if (atomic_dec_and_test(&first->count))
620 kfree(first); 710 kfree(first);
@@ -675,7 +765,7 @@ static void rx_queue_remove_kobjects(struct net_device *net)
675 kobject_put(&net->_rx[i].kobj); 765 kobject_put(&net->_rx[i].kobj);
676 kset_unregister(net->queues_kset); 766 kset_unregister(net->queues_kset);
677} 767}
678 768#endif /* CONFIG_RPS */
679#endif /* CONFIG_SYSFS */ 769#endif /* CONFIG_SYSFS */
680 770
681#ifdef CONFIG_HOTPLUG 771#ifdef CONFIG_HOTPLUG
@@ -739,7 +829,9 @@ void netdev_unregister_kobject(struct net_device * net)
739 if (!net_eq(dev_net(net), &init_net)) 829 if (!net_eq(dev_net(net), &init_net))
740 return; 830 return;
741 831
832#ifdef CONFIG_RPS
742 rx_queue_remove_kobjects(net); 833 rx_queue_remove_kobjects(net);
834#endif
743 835
744 device_del(dev); 836 device_del(dev);
745} 837}
@@ -780,11 +872,13 @@ int netdev_register_kobject(struct net_device *net)
780 if (error) 872 if (error)
781 return error; 873 return error;
782 874
875#ifdef CONFIG_RPS
783 error = rx_queue_register_kobjects(net); 876 error = rx_queue_register_kobjects(net);
784 if (error) { 877 if (error) {
785 device_del(dev); 878 device_del(dev);
786 return error; 879 return error;
787 } 880 }
881#endif
788 882
789 return error; 883 return error;
790} 884}
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index f1e982c508bb..afa6380ed88a 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -19,6 +19,7 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/netlink.h> 20#include <linux/netlink.h>
21#include <linux/net_dropmon.h> 21#include <linux/net_dropmon.h>
22#include <linux/slab.h>
22 23
23#include <asm/unaligned.h> 24#include <asm/unaligned.h>
24#include <asm/bitops.h> 25#include <asm/bitops.h>
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d4ec38fa64e6..a58f59b97597 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/slab.h>
25#include <net/tcp.h> 26#include <net/tcp.h>
26#include <net/udp.h> 27#include <net/udp.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
@@ -614,7 +615,7 @@ void netpoll_print_options(struct netpoll *np)
614 np->name, np->local_port); 615 np->name, np->local_port);
615 printk(KERN_INFO "%s: local IP %pI4\n", 616 printk(KERN_INFO "%s: local IP %pI4\n",
616 np->name, &np->local_ip); 617 np->name, &np->local_ip);
617 printk(KERN_INFO "%s: interface %s\n", 618 printk(KERN_INFO "%s: interface '%s'\n",
618 np->name, np->dev_name); 619 np->name, np->dev_name);
619 printk(KERN_INFO "%s: remote port %d\n", 620 printk(KERN_INFO "%s: remote port %d\n",
620 np->name, np->remote_port); 621 np->name, np->remote_port);
@@ -661,6 +662,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
661 if ((delim = strchr(cur, '@')) == NULL) 662 if ((delim = strchr(cur, '@')) == NULL)
662 goto parse_failed; 663 goto parse_failed;
663 *delim = 0; 664 *delim = 0;
665 if (*cur == ' ' || *cur == '\t')
666 printk(KERN_INFO "%s: warning: whitespace"
667 "is not allowed\n", np->name);
664 np->remote_port = simple_strtol(cur, NULL, 10); 668 np->remote_port = simple_strtol(cur, NULL, 10);
665 cur = delim; 669 cur = delim;
666 } 670 }
@@ -708,7 +712,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
708 return 0; 712 return 0;
709 713
710 parse_failed: 714 parse_failed:
711 printk(KERN_INFO "%s: couldn't parse config at %s!\n", 715 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
712 np->name, cur); 716 np->name, cur);
713 return -1; 717 return -1;
714} 718}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 43923811bd6a..2ad68da418df 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -169,7 +169,7 @@
169#include <asm/dma.h> 169#include <asm/dma.h>
170#include <asm/div64.h> /* do_div */ 170#include <asm/div64.h> /* do_div */
171 171
172#define VERSION "2.72" 172#define VERSION "2.73"
173#define IP_NAME_SZ 32 173#define IP_NAME_SZ 32
174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
175#define MPLS_STACK_BOTTOM htonl(0x00000100) 175#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -190,6 +190,7 @@
190#define F_IPSEC_ON (1<<12) /* ipsec on for flows */ 190#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
191#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ 191#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
192#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ 192#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
193#define F_NODE (1<<15) /* Node memory alloc*/
193 194
194/* Thread control flag bits */ 195/* Thread control flag bits */
195#define T_STOP (1<<0) /* Stop run */ 196#define T_STOP (1<<0) /* Stop run */
@@ -372,6 +373,7 @@ struct pktgen_dev {
372 373
373 u16 queue_map_min; 374 u16 queue_map_min;
374 u16 queue_map_max; 375 u16 queue_map_max;
376 int node; /* Memory node */
375 377
376#ifdef CONFIG_XFRM 378#ifdef CONFIG_XFRM
377 __u8 ipsmode; /* IPSEC mode (config) */ 379 __u8 ipsmode; /* IPSEC mode (config) */
@@ -607,6 +609,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
607 if (pkt_dev->traffic_class) 609 if (pkt_dev->traffic_class)
608 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); 610 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
609 611
612 if (pkt_dev->node >= 0)
613 seq_printf(seq, " node: %d\n", pkt_dev->node);
614
610 seq_printf(seq, " Flags: "); 615 seq_printf(seq, " Flags: ");
611 616
612 if (pkt_dev->flags & F_IPV6) 617 if (pkt_dev->flags & F_IPV6)
@@ -660,6 +665,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
660 if (pkt_dev->flags & F_SVID_RND) 665 if (pkt_dev->flags & F_SVID_RND)
661 seq_printf(seq, "SVID_RND "); 666 seq_printf(seq, "SVID_RND ");
662 667
668 if (pkt_dev->flags & F_NODE)
669 seq_printf(seq, "NODE_ALLOC ");
670
663 seq_puts(seq, "\n"); 671 seq_puts(seq, "\n");
664 672
665 /* not really stopped, more like last-running-at */ 673 /* not really stopped, more like last-running-at */
@@ -1074,6 +1082,21 @@ static ssize_t pktgen_if_write(struct file *file,
1074 pkt_dev->dst_mac_count); 1082 pkt_dev->dst_mac_count);
1075 return count; 1083 return count;
1076 } 1084 }
1085 if (!strcmp(name, "node")) {
1086 len = num_arg(&user_buffer[i], 10, &value);
1087 if (len < 0)
1088 return len;
1089
1090 i += len;
1091
1092 if (node_possible(value)) {
1093 pkt_dev->node = value;
1094 sprintf(pg_result, "OK: node=%d", pkt_dev->node);
1095 }
1096 else
1097 sprintf(pg_result, "ERROR: node not possible");
1098 return count;
1099 }
1077 if (!strcmp(name, "flag")) { 1100 if (!strcmp(name, "flag")) {
1078 char f[32]; 1101 char f[32];
1079 memset(f, 0, 32); 1102 memset(f, 0, 32);
@@ -1166,12 +1189,18 @@ static ssize_t pktgen_if_write(struct file *file,
1166 else if (strcmp(f, "!IPV6") == 0) 1189 else if (strcmp(f, "!IPV6") == 0)
1167 pkt_dev->flags &= ~F_IPV6; 1190 pkt_dev->flags &= ~F_IPV6;
1168 1191
1192 else if (strcmp(f, "NODE_ALLOC") == 0)
1193 pkt_dev->flags |= F_NODE;
1194
1195 else if (strcmp(f, "!NODE_ALLOC") == 0)
1196 pkt_dev->flags &= ~F_NODE;
1197
1169 else { 1198 else {
1170 sprintf(pg_result, 1199 sprintf(pg_result,
1171 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1200 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
1172 f, 1201 f,
1173 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " 1202 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
1174 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n"); 1203 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n");
1175 return count; 1204 return count;
1176 } 1205 }
1177 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1206 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
@@ -2572,9 +2601,27 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2572 mod_cur_headers(pkt_dev); 2601 mod_cur_headers(pkt_dev);
2573 2602
2574 datalen = (odev->hard_header_len + 16) & ~0xf; 2603 datalen = (odev->hard_header_len + 16) & ~0xf;
2575 skb = __netdev_alloc_skb(odev, 2604
2576 pkt_dev->cur_pkt_size + 64 2605 if (pkt_dev->flags & F_NODE) {
2577 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); 2606 int node;
2607
2608 if (pkt_dev->node >= 0)
2609 node = pkt_dev->node;
2610 else
2611 node = numa_node_id();
2612
2613 skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64
2614 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node);
2615 if (likely(skb)) {
2616 skb_reserve(skb, NET_SKB_PAD);
2617 skb->dev = odev;
2618 }
2619 }
2620 else
2621 skb = __netdev_alloc_skb(odev,
2622 pkt_dev->cur_pkt_size + 64
2623 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
2624
2578 if (!skb) { 2625 if (!skb) {
2579 sprintf(pkt_dev->result, "No memory"); 2626 sprintf(pkt_dev->result, "No memory");
2580 return NULL; 2627 return NULL;
@@ -3674,6 +3721,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3674 pkt_dev->svlan_p = 0; 3721 pkt_dev->svlan_p = 0;
3675 pkt_dev->svlan_cfi = 0; 3722 pkt_dev->svlan_cfi = 0;
3676 pkt_dev->svlan_id = 0xffff; 3723 pkt_dev->svlan_id = 0xffff;
3724 pkt_dev->node = -1;
3677 3725
3678 err = pktgen_setup_dev(pkt_dev, ifname); 3726 err = pktgen_setup_dev(pkt_dev, ifname);
3679 if (err) 3727 if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e1121f0bca6a..78c85985cb30 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -118,7 +118,11 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
118{ 118{
119 struct rtnl_link *tab; 119 struct rtnl_link *tab;
120 120
121 tab = rtnl_msg_handlers[protocol]; 121 if (protocol < NPROTO)
122 tab = rtnl_msg_handlers[protocol];
123 else
124 tab = NULL;
125
122 if (tab == NULL || tab[msgindex].doit == NULL) 126 if (tab == NULL || tab[msgindex].doit == NULL)
123 tab = rtnl_msg_handlers[PF_UNSPEC]; 127 tab = rtnl_msg_handlers[PF_UNSPEC];
124 128
@@ -129,7 +133,11 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
129{ 133{
130 struct rtnl_link *tab; 134 struct rtnl_link *tab;
131 135
132 tab = rtnl_msg_handlers[protocol]; 136 if (protocol < NPROTO)
137 tab = rtnl_msg_handlers[protocol];
138 else
139 tab = NULL;
140
133 if (tab == NULL || tab[msgindex].dumpit == NULL) 141 if (tab == NULL || tab[msgindex].dumpit == NULL)
134 tab = rtnl_msg_handlers[PF_UNSPEC]; 142 tab = rtnl_msg_handlers[PF_UNSPEC];
135 143
@@ -602,36 +610,38 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
602 a->tx_compressed = b->tx_compressed; 610 a->tx_compressed = b->tx_compressed;
603} 611}
604 612
605static void copy_rtnl_link_stats64(struct rtnl_link_stats64 *a, 613static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b)
606 const struct net_device_stats *b)
607{ 614{
608 a->rx_packets = b->rx_packets; 615 struct rtnl_link_stats64 a;
609 a->tx_packets = b->tx_packets; 616
610 a->rx_bytes = b->rx_bytes; 617 a.rx_packets = b->rx_packets;
611 a->tx_bytes = b->tx_bytes; 618 a.tx_packets = b->tx_packets;
612 a->rx_errors = b->rx_errors; 619 a.rx_bytes = b->rx_bytes;
613 a->tx_errors = b->tx_errors; 620 a.tx_bytes = b->tx_bytes;
614 a->rx_dropped = b->rx_dropped; 621 a.rx_errors = b->rx_errors;
615 a->tx_dropped = b->tx_dropped; 622 a.tx_errors = b->tx_errors;
616 623 a.rx_dropped = b->rx_dropped;
617 a->multicast = b->multicast; 624 a.tx_dropped = b->tx_dropped;
618 a->collisions = b->collisions; 625
619 626 a.multicast = b->multicast;
620 a->rx_length_errors = b->rx_length_errors; 627 a.collisions = b->collisions;
621 a->rx_over_errors = b->rx_over_errors; 628
622 a->rx_crc_errors = b->rx_crc_errors; 629 a.rx_length_errors = b->rx_length_errors;
623 a->rx_frame_errors = b->rx_frame_errors; 630 a.rx_over_errors = b->rx_over_errors;
624 a->rx_fifo_errors = b->rx_fifo_errors; 631 a.rx_crc_errors = b->rx_crc_errors;
625 a->rx_missed_errors = b->rx_missed_errors; 632 a.rx_frame_errors = b->rx_frame_errors;
626 633 a.rx_fifo_errors = b->rx_fifo_errors;
627 a->tx_aborted_errors = b->tx_aborted_errors; 634 a.rx_missed_errors = b->rx_missed_errors;
628 a->tx_carrier_errors = b->tx_carrier_errors; 635
629 a->tx_fifo_errors = b->tx_fifo_errors; 636 a.tx_aborted_errors = b->tx_aborted_errors;
630 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 637 a.tx_carrier_errors = b->tx_carrier_errors;
631 a->tx_window_errors = b->tx_window_errors; 638 a.tx_fifo_errors = b->tx_fifo_errors;
632 639 a.tx_heartbeat_errors = b->tx_heartbeat_errors;
633 a->rx_compressed = b->rx_compressed; 640 a.tx_window_errors = b->tx_window_errors;
634 a->tx_compressed = b->tx_compressed; 641
642 a.rx_compressed = b->rx_compressed;
643 a.tx_compressed = b->tx_compressed;
644 memcpy(v, &a, sizeof(a));
635} 645}
636 646
637static inline int rtnl_vfinfo_size(const struct net_device *dev) 647static inline int rtnl_vfinfo_size(const struct net_device *dev)
@@ -651,6 +661,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
651 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 661 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
652 + nla_total_size(sizeof(struct rtnl_link_ifmap)) 662 + nla_total_size(sizeof(struct rtnl_link_ifmap))
653 + nla_total_size(sizeof(struct rtnl_link_stats)) 663 + nla_total_size(sizeof(struct rtnl_link_stats))
664 + nla_total_size(sizeof(struct rtnl_link_stats64))
654 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 665 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
655 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 666 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
656 + nla_total_size(4) /* IFLA_TXQLEN */ 667 + nla_total_size(4) /* IFLA_TXQLEN */
@@ -734,8 +745,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
734 sizeof(struct rtnl_link_stats64)); 745 sizeof(struct rtnl_link_stats64));
735 if (attr == NULL) 746 if (attr == NULL)
736 goto nla_put_failure; 747 goto nla_put_failure;
737
738 stats = dev_get_stats(dev);
739 copy_rtnl_link_stats64(nla_data(attr), stats); 748 copy_rtnl_link_stats64(nla_data(attr), stats);
740 749
741 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 750 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
@@ -1443,9 +1452,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1443 return 0; 1452 return 0;
1444 1453
1445 family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; 1454 family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
1446 if (family >= NPROTO)
1447 return -EAFNOSUPPORT;
1448
1449 sz_idx = type>>2; 1455 sz_idx = type>>2;
1450 kind = type&3; 1456 kind = type&3;
1451 1457
@@ -1513,6 +1519,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1513 case NETDEV_POST_INIT: 1519 case NETDEV_POST_INIT:
1514 case NETDEV_REGISTER: 1520 case NETDEV_REGISTER:
1515 case NETDEV_CHANGE: 1521 case NETDEV_CHANGE:
1522 case NETDEV_PRE_TYPE_CHANGE:
1516 case NETDEV_GOING_DOWN: 1523 case NETDEV_GOING_DOWN:
1517 case NETDEV_UNREGISTER: 1524 case NETDEV_UNREGISTER:
1518 case NETDEV_UNREGISTER_BATCH: 1525 case NETDEV_UNREGISTER_BATCH:
diff --git a/net/core/scm.c b/net/core/scm.c
index 9b264634acfd..b88f6f9d0b97 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -26,6 +26,7 @@
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/pid.h> 27#include <linux/pid.h>
28#include <linux/nsproxy.h> 28#include <linux/nsproxy.h>
29#include <linux/slab.h>
29 30
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
diff --git a/net/core/sock.c b/net/core/sock.c
index c5812bbc2cc9..7effa1e689df 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -364,11 +364,11 @@ EXPORT_SYMBOL(sk_reset_txq);
364 364
365struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 365struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
366{ 366{
367 struct dst_entry *dst = sk->sk_dst_cache; 367 struct dst_entry *dst = __sk_dst_get(sk);
368 368
369 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 369 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
370 sk_tx_queue_clear(sk); 370 sk_tx_queue_clear(sk);
371 sk->sk_dst_cache = NULL; 371 rcu_assign_pointer(sk->sk_dst_cache, NULL);
372 dst_release(dst); 372 dst_release(dst);
373 return NULL; 373 return NULL;
374 } 374 }
@@ -1157,7 +1157,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1157 skb_queue_head_init(&newsk->sk_async_wait_queue); 1157 skb_queue_head_init(&newsk->sk_async_wait_queue);
1158#endif 1158#endif
1159 1159
1160 rwlock_init(&newsk->sk_dst_lock); 1160 spin_lock_init(&newsk->sk_dst_lock);
1161 rwlock_init(&newsk->sk_callback_lock); 1161 rwlock_init(&newsk->sk_callback_lock);
1162 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1162 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1163 af_callback_keys + newsk->sk_family, 1163 af_callback_keys + newsk->sk_family,
@@ -1898,7 +1898,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1898 } else 1898 } else
1899 sk->sk_sleep = NULL; 1899 sk->sk_sleep = NULL;
1900 1900
1901 rwlock_init(&sk->sk_dst_lock); 1901 spin_lock_init(&sk->sk_dst_lock);
1902 rwlock_init(&sk->sk_callback_lock); 1902 rwlock_init(&sk->sk_callback_lock);
1903 lockdep_set_class_and_name(&sk->sk_callback_lock, 1903 lockdep_set_class_and_name(&sk->sk_callback_lock,
1904 af_callback_keys + sk->sk_family, 1904 af_callback_keys + sk->sk_family,
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 06124872af5b..dcc7d25996ab 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -11,11 +11,72 @@
11#include <linux/socket.h> 11#include <linux/socket.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/ratelimit.h> 13#include <linux/ratelimit.h>
14#include <linux/vmalloc.h>
14#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h>
15 17
16#include <net/ip.h> 18#include <net/ip.h>
17#include <net/sock.h> 19#include <net/sock.h>
18 20
21#ifdef CONFIG_RPS
22static int rps_sock_flow_sysctl(ctl_table *table, int write,
23 void __user *buffer, size_t *lenp, loff_t *ppos)
24{
25 unsigned int orig_size, size;
26 int ret, i;
27 ctl_table tmp = {
28 .data = &size,
29 .maxlen = sizeof(size),
30 .mode = table->mode
31 };
32 struct rps_sock_flow_table *orig_sock_table, *sock_table;
33 static DEFINE_MUTEX(sock_flow_mutex);
34
35 mutex_lock(&sock_flow_mutex);
36
37 orig_sock_table = rps_sock_flow_table;
38 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
39
40 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
41
42 if (write) {
43 if (size) {
44 if (size > 1<<30) {
45 /* Enforce limit to prevent overflow */
46 mutex_unlock(&sock_flow_mutex);
47 return -EINVAL;
48 }
49 size = roundup_pow_of_two(size);
50 if (size != orig_size) {
51 sock_table =
52 vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size));
53 if (!sock_table) {
54 mutex_unlock(&sock_flow_mutex);
55 return -ENOMEM;
56 }
57
58 sock_table->mask = size - 1;
59 } else
60 sock_table = orig_sock_table;
61
62 for (i = 0; i < size; i++)
63 sock_table->ents[i] = RPS_NO_CPU;
64 } else
65 sock_table = NULL;
66
67 if (sock_table != orig_sock_table) {
68 rcu_assign_pointer(rps_sock_flow_table, sock_table);
69 synchronize_rcu();
70 vfree(orig_sock_table);
71 }
72 }
73
74 mutex_unlock(&sock_flow_mutex);
75
76 return ret;
77}
78#endif /* CONFIG_RPS */
79
19static struct ctl_table net_core_table[] = { 80static struct ctl_table net_core_table[] = {
20#ifdef CONFIG_NET 81#ifdef CONFIG_NET
21 { 82 {
@@ -81,6 +142,14 @@ static struct ctl_table net_core_table[] = {
81 .mode = 0644, 142 .mode = 0644,
82 .proc_handler = proc_dointvec 143 .proc_handler = proc_dointvec
83 }, 144 },
145#ifdef CONFIG_RPS
146 {
147 .procname = "rps_sock_flow_entries",
148 .maxlen = sizeof(int),
149 .mode = 0644,
150 .proc_handler = rps_sock_flow_sysctl
151 },
152#endif
84#endif /* CONFIG_NET */ 153#endif /* CONFIG_NET */
85 { 154 {
86 .procname = "netdev_budget", 155 .procname = "netdev_budget",
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 813e399220a7..19ac2b985485 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/netlink.h> 21#include <linux/netlink.h>
22#include <linux/slab.h>
22#include <net/netlink.h> 23#include <net/netlink.h>
23#include <net/rtnetlink.h> 24#include <net/rtnetlink.h>
24#include <linux/dcbnl.h> 25#include <linux/dcbnl.h>
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 49d27c556bec..36479ca61e03 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -11,6 +11,8 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/slab.h>
15
14#include "ccid.h" 16#include "ccid.h"
15#include "ccids/lib/tfrc.h" 17#include "ccids/lib/tfrc.h"
16 18
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index a47a8c918ee8..9b3ae9922be1 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -23,6 +23,7 @@
23/* 23/*
24 * This implementation should follow RFC 4341 24 * This implementation should follow RFC 4341
25 */ 25 */
26#include <linux/slab.h>
26#include "../feat.h" 27#include "../feat.h"
27#include "../ccid.h" 28#include "../ccid.h"
28#include "../dccp.h" 29#include "../dccp.h"
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index bcd7632299f5..d3235899c7e3 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -208,7 +208,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
208 goto restart_timer; 208 goto restart_timer;
209 } 209 }
210 210
211 ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, 211 ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
212 ccid3_tx_state_name(hc->tx_state)); 212 ccid3_tx_state_name(hc->tx_state));
213 213
214 if (hc->tx_state == TFRC_SSTATE_FBACK) 214 if (hc->tx_state == TFRC_SSTATE_FBACK)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5ef32c2f0d6a..a10a61a1ded2 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -189,7 +189,7 @@ enum {
189#define DCCP_MIB_MAX __DCCP_MIB_MAX 189#define DCCP_MIB_MAX __DCCP_MIB_MAX
190struct dccp_mib { 190struct dccp_mib {
191 unsigned long mibs[DCCP_MIB_MAX]; 191 unsigned long mibs[DCCP_MIB_MAX];
192} __SNMP_MIB_ALIGN__; 192};
193 193
194DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); 194DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
195#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) 195#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
@@ -223,7 +223,7 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb)
223 skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); 223 skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
224} 224}
225 225
226extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); 226extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
227 227
228extern int dccp_retransmit_skb(struct sock *sk); 228extern int dccp_retransmit_skb(struct sock *sk);
229 229
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 972b8dc918d6..df7dd26cf07e 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -22,6 +22,7 @@
22 * 2 of the License, or (at your option) any later version. 22 * 2 of the License, or (at your option) any later version.
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h>
25#include "ccid.h" 26#include "ccid.h"
26#include "feat.h" 27#include "feat.h"
27 28
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 7648f316310f..58f7bc156850 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15 16
16#include <net/sock.h> 17#include <net/sock.h>
17 18
@@ -414,7 +415,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
414 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, 415 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
415 dp->dccps_awl, dp->dccps_awh)) { 416 dp->dccps_awl, dp->dccps_awh)) {
416 dccp_pr_debug("invalid ackno: S.AWL=%llu, " 417 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
417 "P.ackno=%llu, S.AWH=%llu \n", 418 "P.ackno=%llu, S.AWH=%llu\n",
418 (unsigned long long)dp->dccps_awl, 419 (unsigned long long)dp->dccps_awl,
419 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, 420 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
420 (unsigned long long)dp->dccps_awh); 421 (unsigned long long)dp->dccps_awh);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 4071eaf2b361..d9b11ef8694c 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/icmp.h> 14#include <linux/icmp.h>
15#include <linux/slab.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
17#include <linux/random.h> 18#include <linux/random.h>
@@ -348,7 +349,7 @@ static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
348 return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); 349 return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
349} 350}
350 351
351void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) 352void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
352{ 353{
353 const struct inet_sock *inet = inet_sk(sk); 354 const struct inet_sock *inet = inet_sk(sk);
354 struct dccp_hdr *dh = dccp_hdr(skb); 355 struct dccp_hdr *dh = dccp_hdr(skb);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index af3394df63b7..091698899594 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/random.h> 16#include <linux/random.h>
17#include <linux/slab.h>
17#include <linux/xfrm.h> 18#include <linux/xfrm.h>
18 19
19#include <net/addrconf.h> 20#include <net/addrconf.h>
@@ -59,8 +60,7 @@ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
59 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); 60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
60} 61}
61 62
62static inline void dccp_v6_send_check(struct sock *sk, int unused_value, 63static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
63 struct sk_buff *skb)
64{ 64{
65 struct ipv6_pinfo *np = inet6_sk(sk); 65 struct ipv6_pinfo *np = inet6_sk(sk);
66 struct dccp_hdr *dh = dccp_hdr(skb); 66 struct dccp_hdr *dh = dccp_hdr(skb);
@@ -292,7 +292,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
292 &ireq6->loc_addr, 292 &ireq6->loc_addr,
293 &ireq6->rmt_addr); 293 &ireq6->rmt_addr);
294 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 294 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
295 err = ip6_xmit(sk, skb, &fl, opt, 0); 295 err = ip6_xmit(sk, skb, &fl, opt);
296 err = net_xmit_eval(err); 296 err = net_xmit_eval(err);
297 } 297 }
298 298
@@ -347,7 +347,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
347 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { 347 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
348 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { 348 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
349 skb_dst_set(skb, dst); 349 skb_dst_set(skb, dst);
350 ip6_xmit(ctl_sk, skb, &fl, NULL, 0); 350 ip6_xmit(ctl_sk, skb, &fl, NULL);
351 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 351 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
352 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 352 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
353 return; 353 return;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 0d508c359fa9..128b089d3aef 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/gfp.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/skbuff.h> 16#include <linux/skbuff.h>
16#include <linux/timer.h> 17#include <linux/timer.h>
diff --git a/net/dccp/output.c b/net/dccp/output.c
index d6bb753bf6ad..e98b65e9569f 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -13,6 +13,7 @@
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/slab.h>
16 17
17#include <net/inet_sock.h> 18#include <net/inet_sock.h>
18#include <net/sock.h> 19#include <net/sock.h>
@@ -128,14 +129,14 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
128 break; 129 break;
129 } 130 }
130 131
131 icsk->icsk_af_ops->send_check(sk, 0, skb); 132 icsk->icsk_af_ops->send_check(sk, skb);
132 133
133 if (set_ack) 134 if (set_ack)
134 dccp_event_ack_sent(sk); 135 dccp_event_ack_sent(sk);
135 136
136 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
137 138
138 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 139 err = icsk->icsk_af_ops->queue_xmit(skb);
139 return net_xmit_eval(err); 140 return net_xmit_eval(err);
140 } 141 }
141 return -ENOBUFS; 142 return -ENOBUFS;
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index f5b3464f1242..078e48d442fd 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/kfifo.h> 31#include <linux/kfifo.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/gfp.h>
33#include <net/net_namespace.h> 34#include <net/net_namespace.h>
34 35
35#include "dccp.h" 36#include "dccp.h"
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index aa4cef374fd0..a0e38d8018f5 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -20,6 +20,7 @@
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/slab.h>
23#include <net/checksum.h> 24#include <net/checksum.h>
24 25
25#include <net/inet_sock.h> 26#include <net/inet_sock.h>
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index bbfeb5eae46a..1a9aa05d4dc4 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk)
38 38
39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { 39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
40 if (icsk->icsk_retransmits != 0) 40 if (icsk->icsk_retransmits != 0)
41 dst_negative_advice(&sk->sk_dst_cache, sk); 41 dst_negative_advice(sk);
42 retry_until = icsk->icsk_syn_retries ? 42 retry_until = icsk->icsk_syn_retries ?
43 : sysctl_dccp_request_retries; 43 : sysctl_dccp_request_retries;
44 } else { 44 } else {
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk)
63 Golden words :-). 63 Golden words :-).
64 */ 64 */
65 65
66 dst_negative_advice(&sk->sk_dst_cache, sk); 66 dst_negative_advice(sk);
67 } 67 }
68 68
69 retry_until = sysctl_dccp_retries2; 69 retry_until = sysctl_dccp_retries2;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2b494fac9468..55e3b6b0061a 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -446,7 +446,7 @@ static void dn_destruct(struct sock *sk)
446 skb_queue_purge(&scp->other_xmit_queue); 446 skb_queue_purge(&scp->other_xmit_queue);
447 skb_queue_purge(&scp->other_receive_queue); 447 skb_queue_purge(&scp->other_receive_queue);
448 448
449 dst_release(xchg(&sk->sk_dst_cache, NULL)); 449 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
450} 450}
451 451
452static int dn_memory_pressure; 452static int dn_memory_pressure;
@@ -1105,7 +1105,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1105 release_sock(sk); 1105 release_sock(sk);
1106 1106
1107 dst = skb_dst(skb); 1107 dst = skb_dst(skb);
1108 dst_release(xchg(&newsk->sk_dst_cache, dst)); 1108 sk_dst_set(newsk, dst);
1109 skb_dst_set(skb, NULL); 1109 skb_dst_set(skb, NULL);
1110 1110
1111 DN_SK(newsk)->state = DN_CR; 1111 DN_SK(newsk)->state = DN_CR;
@@ -1956,7 +1956,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1956 } 1956 }
1957 1957
1958 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) 1958 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1959 dst_negative_advice(&sk->sk_dst_cache, sk); 1959 dst_negative_advice(sk);
1960 1960
1961 mss = scp->segsize_rem; 1961 mss = scp->segsize_rem;
1962 fctype = scp->services_rem & NSP_FC_MASK; 1962 fctype = scp->services_rem & NSP_FC_MASK;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 238af093495b..615dbe3b43f9 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -40,6 +40,7 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/sysctl.h> 41#include <linux/sysctl.h>
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/slab.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44#include <asm/system.h> 45#include <asm/system.h>
45#include <net/net_namespace.h> 46#include <net/net_namespace.h>
@@ -349,7 +350,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de
349 if (dn_db->dev->type == ARPHRD_ETHER) { 350 if (dn_db->dev->type == ARPHRD_ETHER) {
350 if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { 351 if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
351 dn_dn2eth(mac_addr, ifa1->ifa_local); 352 dn_dn2eth(mac_addr, ifa1->ifa_local);
352 dev_mc_delete(dev, mac_addr, ETH_ALEN, 0); 353 dev_mc_del(dev, mac_addr);
353 } 354 }
354 } 355 }
355 356
@@ -380,7 +381,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
380 if (dev->type == ARPHRD_ETHER) { 381 if (dev->type == ARPHRD_ETHER) {
381 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { 382 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
382 dn_dn2eth(mac_addr, ifa->ifa_local); 383 dn_dn2eth(mac_addr, ifa->ifa_local);
383 dev_mc_add(dev, mac_addr, ETH_ALEN, 0); 384 dev_mc_add(dev, mac_addr);
384 } 385 }
385 } 386 }
386 387
@@ -1000,9 +1001,9 @@ static int dn_eth_up(struct net_device *dev)
1000 struct dn_dev *dn_db = dev->dn_ptr; 1001 struct dn_dev *dn_db = dev->dn_ptr;
1001 1002
1002 if (dn_db->parms.forwarding == 0) 1003 if (dn_db->parms.forwarding == 0)
1003 dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); 1004 dev_mc_add(dev, dn_rt_all_end_mcast);
1004 else 1005 else
1005 dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); 1006 dev_mc_add(dev, dn_rt_all_rt_mcast);
1006 1007
1007 dn_db->use_long = 1; 1008 dn_db->use_long = 1;
1008 1009
@@ -1014,9 +1015,9 @@ static void dn_eth_down(struct net_device *dev)
1014 struct dn_dev *dn_db = dev->dn_ptr; 1015 struct dn_dev *dn_db = dev->dn_ptr;
1015 1016
1016 if (dn_db->parms.forwarding == 0) 1017 if (dn_db->parms.forwarding == 0)
1017 dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); 1018 dev_mc_del(dev, dn_rt_all_end_mcast);
1018 else 1019 else
1019 dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); 1020 dev_mc_del(dev, dn_rt_all_rt_mcast);
1020} 1021}
1021 1022
1022static void dn_dev_set_timer(struct net_device *dev); 1023static void dn_dev_set_timer(struct net_device *dev);
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index e9d48700e83a..4ab96c15166d 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -20,6 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/net.h> 21#include <linux/net.h>
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/slab.h>
23#include <linux/sockios.h> 24#include <linux/sockios.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/skbuff.h> 26#include <linux/skbuff.h>
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 76622c0442be..0363bb95cc7d 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/socket.h> 29#include <linux/socket.h>
30#include <linux/if_arp.h> 30#include <linux/if_arp.h>
31#include <linux/slab.h>
31#include <linux/if_ether.h> 32#include <linux/if_ether.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 65531ad96e70..b430549e2b91 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -57,6 +57,7 @@
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/route.h> 59#include <linux/route.h>
60#include <linux/slab.h>
60#include <net/sock.h> 61#include <net/sock.h>
61#include <net/tcp_states.h> 62#include <net/tcp_states.h>
62#include <asm/system.h> 63#include <asm/system.h>
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index a65e929ce76c..baeb1eaf011b 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -50,6 +50,7 @@
50#include <linux/netdevice.h> 50#include <linux/netdevice.h>
51#include <linux/inet.h> 51#include <linux/inet.h>
52#include <linux/route.h> 52#include <linux/route.h>
53#include <linux/slab.h>
53#include <net/sock.h> 54#include <net/sock.h>
54#include <asm/system.h> 55#include <asm/system.h>
55#include <linux/fcntl.h> 56#include <linux/fcntl.h>
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 86eca5f7f678..a8432e399545 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -66,6 +66,7 @@
66#include <linux/inet.h> 66#include <linux/inet.h>
67#include <linux/route.h> 67#include <linux/route.h>
68#include <linux/in_route.h> 68#include <linux/in_route.h>
69#include <linux/slab.h>
69#include <net/sock.h> 70#include <net/sock.h>
70#include <linux/mm.h> 71#include <linux/mm.h>
71#include <linux/proc_fs.h> 72#include <linux/proc_fs.h>
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 7466c546f286..af28dcc21844 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -196,7 +196,6 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
196{ 196{
197 struct dn_fib_rule *r = (struct dn_fib_rule *)rule; 197 struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
198 198
199 frh->family = AF_DECnet;
200 frh->dst_len = r->dst_len; 199 frh->dst_len = r->dst_len;
201 frh->src_len = r->src_len; 200 frh->src_len = r->src_len;
202 frh->tos = 0; 201 frh->tos = 0;
@@ -212,30 +211,13 @@ nla_put_failure:
212 return -ENOBUFS; 211 return -ENOBUFS;
213} 212}
214 213
215static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops)
216{
217 struct list_head *pos;
218 struct fib_rule *rule;
219
220 if (!list_empty(&dn_fib_rules_ops->rules_list)) {
221 pos = dn_fib_rules_ops->rules_list.next;
222 if (pos->next != &dn_fib_rules_ops->rules_list) {
223 rule = list_entry(pos->next, struct fib_rule, list);
224 if (rule->pref)
225 return rule->pref - 1;
226 }
227 }
228
229 return 0;
230}
231
232static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) 214static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
233{ 215{
234 dn_rt_cache_flush(-1); 216 dn_rt_cache_flush(-1);
235} 217}
236 218
237static struct fib_rules_ops dn_fib_rules_ops_template = { 219static struct fib_rules_ops dn_fib_rules_ops_template = {
238 .family = AF_DECnet, 220 .family = FIB_RULES_DECNET,
239 .rule_size = sizeof(struct dn_fib_rule), 221 .rule_size = sizeof(struct dn_fib_rule),
240 .addr_size = sizeof(u16), 222 .addr_size = sizeof(u16),
241 .action = dn_fib_rule_action, 223 .action = dn_fib_rule_action,
@@ -243,7 +225,7 @@ static struct fib_rules_ops dn_fib_rules_ops_template = {
243 .configure = dn_fib_rule_configure, 225 .configure = dn_fib_rule_configure,
244 .compare = dn_fib_rule_compare, 226 .compare = dn_fib_rule_compare,
245 .fill = dn_fib_rule_fill, 227 .fill = dn_fib_rule_fill,
246 .default_pref = dn_fib_rule_default_pref, 228 .default_pref = fib_default_rule_pref,
247 .flush_cache = dn_fib_rule_flush_cache, 229 .flush_cache = dn_fib_rule_flush_cache,
248 .nlgroup = RTNLGRP_DECnet_RULE, 230 .nlgroup = RTNLGRP_DECnet_RULE,
249 .policy = dn_fib_rule_policy, 231 .policy = dn_fib_rule_policy,
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index b9a33bb5e9cc..f2abd3755690 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/net.h> 16#include <linux/net.h>
17#include <linux/socket.h> 17#include <linux/socket.h>
18#include <linux/slab.h>
18#include <linux/sockios.h> 19#include <linux/sockios.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/skbuff.h> 21#include <linux/skbuff.h>
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 6d2bd3202048..64a7f39e069f 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/slab.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/netdevice.h> 19#include <linux/netdevice.h>
19#include <linux/netfilter.h> 20#include <linux/netfilter.h>
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 71489f69a42c..6112a12578b2 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/slab.h>
14#include <net/dsa.h> 15#include <net/dsa.h>
15#include "dsa_priv.h" 16#include "dsa_priv.h"
16 17
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 2175e6d5cc8d..8fdca56bb08f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev)
67 return -ENETDOWN; 67 return -ENETDOWN;
68 68
69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { 69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
70 err = dev_unicast_add(master, dev->dev_addr); 70 err = dev_uc_add(master, dev->dev_addr);
71 if (err < 0) 71 if (err < 0)
72 goto out; 72 goto out;
73 } 73 }
@@ -90,7 +90,7 @@ clear_allmulti:
90 dev_set_allmulti(master, -1); 90 dev_set_allmulti(master, -1);
91del_unicast: 91del_unicast:
92 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 92 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
93 dev_unicast_delete(master, dev->dev_addr); 93 dev_uc_del(master, dev->dev_addr);
94out: 94out:
95 return err; 95 return err;
96} 96}
@@ -101,14 +101,14 @@ static int dsa_slave_close(struct net_device *dev)
101 struct net_device *master = p->parent->dst->master_netdev; 101 struct net_device *master = p->parent->dst->master_netdev;
102 102
103 dev_mc_unsync(master, dev); 103 dev_mc_unsync(master, dev);
104 dev_unicast_unsync(master, dev); 104 dev_uc_unsync(master, dev);
105 if (dev->flags & IFF_ALLMULTI) 105 if (dev->flags & IFF_ALLMULTI)
106 dev_set_allmulti(master, -1); 106 dev_set_allmulti(master, -1);
107 if (dev->flags & IFF_PROMISC) 107 if (dev->flags & IFF_PROMISC)
108 dev_set_promiscuity(master, -1); 108 dev_set_promiscuity(master, -1);
109 109
110 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 110 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
111 dev_unicast_delete(master, dev->dev_addr); 111 dev_uc_del(master, dev->dev_addr);
112 112
113 return 0; 113 return 0;
114} 114}
@@ -130,7 +130,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev)
130 struct net_device *master = p->parent->dst->master_netdev; 130 struct net_device *master = p->parent->dst->master_netdev;
131 131
132 dev_mc_sync(master, dev); 132 dev_mc_sync(master, dev);
133 dev_unicast_sync(master, dev); 133 dev_uc_sync(master, dev);
134} 134}
135 135
136static int dsa_slave_set_mac_address(struct net_device *dev, void *a) 136static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
147 goto out; 147 goto out;
148 148
149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) { 149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
150 err = dev_unicast_add(master, addr->sa_data); 150 err = dev_uc_add(master, addr->sa_data);
151 if (err < 0) 151 if (err < 0)
152 return err; 152 return err;
153 } 153 }
154 154
155 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 155 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
156 dev_unicast_delete(master, dev->dev_addr); 156 dev_uc_del(master, dev->dev_addr);
157 157
158out: 158out:
159 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 159 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index cdf2d28a0297..98dfe80b4538 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -11,6 +11,7 @@
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <linux/slab.h>
14#include "dsa_priv.h" 15#include "dsa_priv.h"
15 16
16#define DSA_HLEN 4 17#define DSA_HLEN 4
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 8f53948cff4f..6f383322ad25 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -11,6 +11,7 @@
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <linux/slab.h>
14#include "dsa_priv.h" 15#include "dsa_priv.h"
15 16
16#define DSA_HLEN 4 17#define DSA_HLEN 4
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index a85c829853c0..d6d7d0add3cb 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -11,6 +11,7 @@
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <linux/slab.h>
14#include "dsa_priv.h" 15#include "dsa_priv.h"
15 16
16netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) 17netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 29b4931aae52..2a5a8053e000 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -30,6 +30,7 @@
30#include <linux/wireless.h> 30#include <linux/wireless.h>
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/udp.h> 32#include <linux/udp.h>
33#include <linux/slab.h>
33#include <net/sock.h> 34#include <net/sock.h>
34#include <net/inet_common.h> 35#include <net/inet_common.h>
35#include <linux/stat.h> 36#include <linux/stat.h>
diff --git a/net/ethernet/pe2.c b/net/ethernet/pe2.c
index d60e15d9365e..eb00796758c3 100644
--- a/net/ethernet/pe2.c
+++ b/net/ethernet/pe2.c
@@ -3,6 +3,7 @@
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/skbuff.h> 5#include <linux/skbuff.h>
6#include <linux/slab.h>
6 7
7#include <net/datalink.h> 8#include <net/datalink.h>
8 9
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index bad1c49fd960..c7da600750bb 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -28,6 +28,7 @@
28#include <linux/if.h> 28#include <linux/if.h>
29#include <linux/termios.h> /* For TIOCOUTQ/INQ */ 29#include <linux/termios.h> /* For TIOCOUTQ/INQ */
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/slab.h>
31#include <net/datalink.h> 32#include <net/datalink.h>
32#include <net/psnap.h> 33#include <net/psnap.h>
33#include <net/sock.h> 34#include <net/sock.h>
@@ -126,6 +127,9 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
126{ 127{
127 struct sock *sk = sock->sk; 128 struct sock *sk = sock->sk;
128 129
130 if (addr_len < sizeof(uaddr->sa_family))
131 return -EINVAL;
132
129 if (uaddr->sa_family == AF_UNSPEC) 133 if (uaddr->sa_family == AF_UNSPEC)
130 return sk->sk_prot->disconnect(sk, flags); 134 return sk->sk_prot->disconnect(sk, flags);
131 135
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 9aac5aee1575..1a3334c2609a 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/if_arp.h> 26#include <linux/if_arp.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/slab.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <net/af_ieee802154.h> 30#include <net/af_ieee802154.h>
30#include <net/ieee802154.h> 31#include <net/ieee802154.h>
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 33137b99e471..c8097ae2482f 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/gfp.h>
26#include <net/genetlink.h> 27#include <net/genetlink.h>
27#include <linux/nl802154.h> 28#include <linux/nl802154.h>
28 29
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 135c1678fb11..71ee1108d4f8 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -22,6 +22,7 @@
22 * Maxim Osipov <maxim.osipov@siemens.com> 22 * Maxim Osipov <maxim.osipov@siemens.com>
23 */ 23 */
24 24
25#include <linux/gfp.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/if_arp.h> 27#include <linux/if_arp.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 199a2d9d12f9..ed0eab39f531 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h>
26#include <net/netlink.h> 27#include <net/netlink.h>
27#include <net/genetlink.h> 28#include <net/genetlink.h>
28#include <net/wpan-phy.h> 29#include <net/wpan-phy.h>
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 9c9b85c00033..10970ca85748 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/if_arp.h> 26#include <linux/if_arp.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/slab.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <net/af_ieee802154.h> 30#include <net/af_ieee802154.h>
30 31
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 268691256a6d..3d803a1b9fb6 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -16,6 +16,7 @@
16 * 16 *
17 */ 17 */
18 18
19#include <linux/slab.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/device.h> 22#include <linux/device.h>
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index c9a1c68767ff..8e3a1fd938ab 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -250,6 +250,20 @@ config IP_MROUTE
250 <file:Documentation/networking/multicast.txt>. If you haven't heard 250 <file:Documentation/networking/multicast.txt>. If you haven't heard
251 about it, you don't need it. 251 about it, you don't need it.
252 252
253config IP_MROUTE_MULTIPLE_TABLES
254 bool "IP: multicast policy routing"
255 depends on IP_MROUTE && IP_ADVANCED_ROUTER
256 select FIB_RULES
257 help
258 Normally, a multicast router runs a userspace daemon and decides
259 what to do with a multicast packet based on the source and
260 destination addresses. If you say Y here, the multicast router
261 will also be able to take interfaces and packet marks into
262 account and run multiple instances of userspace daemons
263 simultaneously, each one handling a single table.
264
265 If unsure, say N.
266
253config IP_PIMSM_V1 267config IP_PIMSM_V1
254 bool "IP: PIM-SM version 1 support" 268 bool "IP: PIM-SM version 1 support"
255 depends on IP_MROUTE 269 depends on IP_MROUTE
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 33b7dffa7732..c5376c725503 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -86,6 +86,7 @@
86#include <linux/poll.h> 86#include <linux/poll.h>
87#include <linux/netfilter_ipv4.h> 87#include <linux/netfilter_ipv4.h>
88#include <linux/random.h> 88#include <linux/random.h>
89#include <linux/slab.h>
89 90
90#include <asm/uaccess.h> 91#include <asm/uaccess.h>
91#include <asm/system.h> 92#include <asm/system.h>
@@ -153,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
153 WARN_ON(sk->sk_forward_alloc); 154 WARN_ON(sk->sk_forward_alloc);
154 155
155 kfree(inet->opt); 156 kfree(inet->opt);
156 dst_release(sk->sk_dst_cache); 157 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
157 sk_refcnt_debug_dec(sk); 158 sk_refcnt_debug_dec(sk);
158} 159}
159EXPORT_SYMBOL(inet_sock_destruct); 160EXPORT_SYMBOL(inet_sock_destruct);
@@ -418,6 +419,8 @@ int inet_release(struct socket *sock)
418 if (sk) { 419 if (sk) {
419 long timeout; 420 long timeout;
420 421
422 inet_rps_reset_flow(sk);
423
421 /* Applications forget to leave groups before exiting */ 424 /* Applications forget to leave groups before exiting */
422 ip_mc_drop_socket(sk); 425 ip_mc_drop_socket(sk);
423 426
@@ -530,6 +533,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
530{ 533{
531 struct sock *sk = sock->sk; 534 struct sock *sk = sock->sk;
532 535
536 if (addr_len < sizeof(uaddr->sa_family))
537 return -EINVAL;
533 if (uaddr->sa_family == AF_UNSPEC) 538 if (uaddr->sa_family == AF_UNSPEC)
534 return sk->sk_prot->disconnect(sk, flags); 539 return sk->sk_prot->disconnect(sk, flags);
535 540
@@ -573,6 +578,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
573 int err; 578 int err;
574 long timeo; 579 long timeo;
575 580
581 if (addr_len < sizeof(uaddr->sa_family))
582 return -EINVAL;
583
576 lock_sock(sk); 584 lock_sock(sk);
577 585
578 if (uaddr->sa_family == AF_UNSPEC) { 586 if (uaddr->sa_family == AF_UNSPEC) {
@@ -714,6 +722,8 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
714{ 722{
715 struct sock *sk = sock->sk; 723 struct sock *sk = sock->sk;
716 724
725 inet_rps_record_flow(sk);
726
717 /* We may need to bind the socket. */ 727 /* We may need to bind the socket. */
718 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 728 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
719 return -EAGAIN; 729 return -EAGAIN;
@@ -722,12 +732,13 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
722} 732}
723EXPORT_SYMBOL(inet_sendmsg); 733EXPORT_SYMBOL(inet_sendmsg);
724 734
725
726static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, 735static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
727 size_t size, int flags) 736 size_t size, int flags)
728{ 737{
729 struct sock *sk = sock->sk; 738 struct sock *sk = sock->sk;
730 739
740 inet_rps_record_flow(sk);
741
731 /* We may need to bind the socket. */ 742 /* We may need to bind the socket. */
732 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 743 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
733 return -EAGAIN; 744 return -EAGAIN;
@@ -737,6 +748,22 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
737 return sock_no_sendpage(sock, page, offset, size, flags); 748 return sock_no_sendpage(sock, page, offset, size, flags);
738} 749}
739 750
751int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
752 size_t size, int flags)
753{
754 struct sock *sk = sock->sk;
755 int addr_len = 0;
756 int err;
757
758 inet_rps_record_flow(sk);
759
760 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
761 flags & ~MSG_DONTWAIT, &addr_len);
762 if (err >= 0)
763 msg->msg_namelen = addr_len;
764 return err;
765}
766EXPORT_SYMBOL(inet_recvmsg);
740 767
741int inet_shutdown(struct socket *sock, int how) 768int inet_shutdown(struct socket *sock, int how)
742{ 769{
@@ -866,7 +893,7 @@ const struct proto_ops inet_stream_ops = {
866 .setsockopt = sock_common_setsockopt, 893 .setsockopt = sock_common_setsockopt,
867 .getsockopt = sock_common_getsockopt, 894 .getsockopt = sock_common_getsockopt,
868 .sendmsg = tcp_sendmsg, 895 .sendmsg = tcp_sendmsg,
869 .recvmsg = sock_common_recvmsg, 896 .recvmsg = inet_recvmsg,
870 .mmap = sock_no_mmap, 897 .mmap = sock_no_mmap,
871 .sendpage = tcp_sendpage, 898 .sendpage = tcp_sendpage,
872 .splice_read = tcp_splice_read, 899 .splice_read = tcp_splice_read,
@@ -893,7 +920,7 @@ const struct proto_ops inet_dgram_ops = {
893 .setsockopt = sock_common_setsockopt, 920 .setsockopt = sock_common_setsockopt,
894 .getsockopt = sock_common_getsockopt, 921 .getsockopt = sock_common_getsockopt,
895 .sendmsg = inet_sendmsg, 922 .sendmsg = inet_sendmsg,
896 .recvmsg = sock_common_recvmsg, 923 .recvmsg = inet_recvmsg,
897 .mmap = sock_no_mmap, 924 .mmap = sock_no_mmap,
898 .sendpage = inet_sendpage, 925 .sendpage = inet_sendpage,
899#ifdef CONFIG_COMPAT 926#ifdef CONFIG_COMPAT
@@ -923,7 +950,7 @@ static const struct proto_ops inet_sockraw_ops = {
923 .setsockopt = sock_common_setsockopt, 950 .setsockopt = sock_common_setsockopt,
924 .getsockopt = sock_common_getsockopt, 951 .getsockopt = sock_common_getsockopt,
925 .sendmsg = inet_sendmsg, 952 .sendmsg = inet_sendmsg,
926 .recvmsg = sock_common_recvmsg, 953 .recvmsg = inet_recvmsg,
927 .mmap = sock_no_mmap, 954 .mmap = sock_no_mmap,
928 .sendpage = inet_sendpage, 955 .sendpage = inet_sendpage,
929#ifdef CONFIG_COMPAT 956#ifdef CONFIG_COMPAT
@@ -1401,10 +1428,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field);
1401int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) 1428int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
1402{ 1429{
1403 BUG_ON(ptr == NULL); 1430 BUG_ON(ptr == NULL);
1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1431 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long));
1405 if (!ptr[0]) 1432 if (!ptr[0])
1406 goto err0; 1433 goto err0;
1407 ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1434 ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long));
1408 if (!ptr[1]) 1435 if (!ptr[1])
1409 goto err1; 1436 goto err1;
1410 return 0; 1437 return 0;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 987b47dc69ad..880a5ec6dce0 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,6 +1,7 @@
1#include <crypto/hash.h> 1#include <crypto/hash.h>
2#include <linux/err.h> 2#include <linux/err.h>
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h>
4#include <net/ip.h> 5#include <net/ip.h>
5#include <net/xfrm.h> 6#include <net/xfrm.h>
6#include <net/ah.h> 7#include <net/ah.h>
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c4dd13542802..6e747065c202 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -98,6 +98,7 @@
98#include <linux/net.h> 98#include <linux/net.h>
99#include <linux/rcupdate.h> 99#include <linux/rcupdate.h>
100#include <linux/jhash.h> 100#include <linux/jhash.h>
101#include <linux/slab.h>
101#ifdef CONFIG_SYSCTL 102#ifdef CONFIG_SYSCTL
102#include <linux/sysctl.h> 103#include <linux/sysctl.h>
103#endif 104#endif
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 1e029dc75455..c97cd9ff697e 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -44,6 +44,7 @@
44#include <linux/string.h> 44#include <linux/string.h>
45#include <linux/jhash.h> 45#include <linux/jhash.h>
46#include <linux/audit.h> 46#include <linux/audit.h>
47#include <linux/slab.h>
47#include <net/ip.h> 48#include <net/ip.h>
48#include <net/icmp.h> 49#include <net/icmp.h>
49#include <net/tcp.h> 50#include <net/tcp.h>
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 51ca946e3392..382bc768ed56 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -50,6 +50,7 @@
50#include <linux/notifier.h> 50#include <linux/notifier.h>
51#include <linux/inetdevice.h> 51#include <linux/inetdevice.h>
52#include <linux/igmp.h> 52#include <linux/igmp.h>
53#include <linux/slab.h>
53#ifdef CONFIG_SYSCTL 54#ifdef CONFIG_SYSCTL
54#include <linux/sysctl.h> 55#include <linux/sysctl.h>
55#endif 56#endif
@@ -1095,10 +1096,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1095 case NETDEV_DOWN: 1096 case NETDEV_DOWN:
1096 ip_mc_down(in_dev); 1097 ip_mc_down(in_dev);
1097 break; 1098 break;
1098 case NETDEV_BONDING_OLDTYPE: 1099 case NETDEV_PRE_TYPE_CHANGE:
1099 ip_mc_unmap(in_dev); 1100 ip_mc_unmap(in_dev);
1100 break; 1101 break;
1101 case NETDEV_BONDING_NEWTYPE: 1102 case NETDEV_POST_TYPE_CHANGE:
1102 ip_mc_remap(in_dev); 1103 ip_mc_remap(in_dev);
1103 break; 1104 break;
1104 case NETDEV_CHANGEMTU: 1105 case NETDEV_CHANGEMTU:
@@ -1194,7 +1195,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1194 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1195 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1195 if (idx < s_idx) 1196 if (idx < s_idx)
1196 goto cont; 1197 goto cont;
1197 if (idx > s_idx) 1198 if (h > s_h || idx > s_idx)
1198 s_ip_idx = 0; 1199 s_ip_idx = 0;
1199 in_dev = __in_dev_get_rcu(dev); 1200 in_dev = __in_dev_get_rcu(dev);
1200 if (!in_dev) 1201 if (!in_dev)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 9b3e28ed5240..4f0ed458c883 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -34,6 +34,7 @@
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/list.h> 36#include <linux/list.h>
37#include <linux/slab.h>
37 38
38#include <net/ip.h> 39#include <net/ip.h>
39#include <net/protocol.h> 40#include <net/protocol.h>
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 14972017b9c2..4ed7e0dea1bc 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -32,6 +32,7 @@
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33#include <linux/netlink.h> 33#include <linux/netlink.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/slab.h>
35 36
36#include <net/net_namespace.h> 37#include <net/net_namespace.h>
37#include <net/ip.h> 38#include <net/ip.h>
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index ca2d07b1c706..3ec84fea5b71 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -213,7 +213,6 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
213{ 213{
214 struct fib4_rule *rule4 = (struct fib4_rule *) rule; 214 struct fib4_rule *rule4 = (struct fib4_rule *) rule;
215 215
216 frh->family = AF_INET;
217 frh->dst_len = rule4->dst_len; 216 frh->dst_len = rule4->dst_len;
218 frh->src_len = rule4->src_len; 217 frh->src_len = rule4->src_len;
219 frh->tos = rule4->tos; 218 frh->tos = rule4->tos;
@@ -234,23 +233,6 @@ nla_put_failure:
234 return -ENOBUFS; 233 return -ENOBUFS;
235} 234}
236 235
237static u32 fib4_rule_default_pref(struct fib_rules_ops *ops)
238{
239 struct list_head *pos;
240 struct fib_rule *rule;
241
242 if (!list_empty(&ops->rules_list)) {
243 pos = ops->rules_list.next;
244 if (pos->next != &ops->rules_list) {
245 rule = list_entry(pos->next, struct fib_rule, list);
246 if (rule->pref)
247 return rule->pref - 1;
248 }
249 }
250
251 return 0;
252}
253
254static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) 236static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
255{ 237{
256 return nla_total_size(4) /* dst */ 238 return nla_total_size(4) /* dst */
@@ -264,7 +246,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
264} 246}
265 247
266static struct fib_rules_ops fib4_rules_ops_template = { 248static struct fib_rules_ops fib4_rules_ops_template = {
267 .family = AF_INET, 249 .family = FIB_RULES_IPV4,
268 .rule_size = sizeof(struct fib4_rule), 250 .rule_size = sizeof(struct fib4_rule),
269 .addr_size = sizeof(u32), 251 .addr_size = sizeof(u32),
270 .action = fib4_rule_action, 252 .action = fib4_rule_action,
@@ -272,7 +254,7 @@ static struct fib_rules_ops fib4_rules_ops_template = {
272 .configure = fib4_rule_configure, 254 .configure = fib4_rule_configure,
273 .compare = fib4_rule_compare, 255 .compare = fib4_rule_compare,
274 .fill = fib4_rule_fill, 256 .fill = fib4_rule_fill,
275 .default_pref = fib4_rule_default_pref, 257 .default_pref = fib_default_rule_pref,
276 .nlmsg_payload = fib4_rule_nlmsg_payload, 258 .nlmsg_payload = fib4_rule_nlmsg_payload,
277 .flush_cache = fib4_rule_flush_cache, 259 .flush_cache = fib4_rule_flush_cache,
278 .nlgroup = RTNLGRP_IPV4_RULE, 260 .nlgroup = RTNLGRP_IPV4_RULE,
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 1af0ea0fb6a2..20f09c5b31e8 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/slab.h>
35 36
36#include <net/arp.h> 37#include <net/arp.h>
37#include <net/ip.h> 38#include <net/ip.h>
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index af5d89792860..59a838795e3e 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,6 +71,7 @@
71#include <linux/netlink.h> 71#include <linux/netlink.h>
72#include <linux/init.h> 72#include <linux/init.h>
73#include <linux/list.h> 73#include <linux/list.h>
74#include <linux/slab.h>
74#include <net/net_namespace.h> 75#include <net/net_namespace.h>
75#include <net/ip.h> 76#include <net/ip.h>
76#include <net/protocol.h> 77#include <net/protocol.h>
@@ -961,7 +962,9 @@ fib_find_node(struct trie *t, u32 key)
961 struct node *n; 962 struct node *n;
962 963
963 pos = 0; 964 pos = 0;
964 n = rcu_dereference(t->trie); 965 n = rcu_dereference_check(t->trie,
966 rcu_read_lock_held() ||
967 lockdep_rtnl_is_held());
965 968
966 while (n != NULL && NODE_TYPE(n) == T_TNODE) { 969 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
967 tn = (struct tnode *) n; 970 tn = (struct tnode *) n;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 4b4c2bcd15db..f3d339f728b0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -74,6 +74,7 @@
74#include <linux/netdevice.h> 74#include <linux/netdevice.h>
75#include <linux/string.h> 75#include <linux/string.h>
76#include <linux/netfilter_ipv4.h> 76#include <linux/netfilter_ipv4.h>
77#include <linux/slab.h>
77#include <net/snmp.h> 78#include <net/snmp.h>
78#include <net/ip.h> 79#include <net/ip.h>
79#include <net/route.h> 80#include <net/route.h>
@@ -330,9 +331,10 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
330 if (ip_append_data(sk, icmp_glue_bits, icmp_param, 331 if (ip_append_data(sk, icmp_glue_bits, icmp_param,
331 icmp_param->data_len+icmp_param->head_len, 332 icmp_param->data_len+icmp_param->head_len,
332 icmp_param->head_len, 333 icmp_param->head_len,
333 ipc, rt, MSG_DONTWAIT) < 0) 334 ipc, rt, MSG_DONTWAIT) < 0) {
335 ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
334 ip_flush_pending_frames(sk); 336 ip_flush_pending_frames(sk);
335 else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 337 } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
336 struct icmphdr *icmph = icmp_hdr(skb); 338 struct icmphdr *icmph = icmp_hdr(skb);
337 __wsum csum = 0; 339 __wsum csum = 0;
338 struct sk_buff *skb1; 340 struct sk_buff *skb1;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 63bf298ca109..5fff865a4fa7 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -71,6 +71,7 @@
71 */ 71 */
72 72
73#include <linux/module.h> 73#include <linux/module.h>
74#include <linux/slab.h>
74#include <asm/uaccess.h> 75#include <asm/uaccess.h>
75#include <asm/system.h> 76#include <asm/system.h>
76#include <linux/types.h> 77#include <linux/types.h>
@@ -997,7 +998,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
997 --ANK 998 --ANK
998 */ 999 */
999 if (arp_mc_map(addr, buf, dev, 0) == 0) 1000 if (arp_mc_map(addr, buf, dev, 0) == 0)
1000 dev_mc_add(dev, buf, dev->addr_len, 0); 1001 dev_mc_add(dev, buf);
1001} 1002}
1002 1003
1003/* 1004/*
@@ -1010,7 +1011,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
1010 struct net_device *dev = in_dev->dev; 1011 struct net_device *dev = in_dev->dev;
1011 1012
1012 if (arp_mc_map(addr, buf, dev, 0) == 0) 1013 if (arp_mc_map(addr, buf, dev, 0) == 0)
1013 dev_mc_delete(dev, buf, dev->addr_len, 0); 1014 dev_mc_del(dev, buf);
1014} 1015}
1015 1016
1016#ifdef CONFIG_IP_MULTICAST 1017#ifdef CONFIG_IP_MULTICAST
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1aaa8110d84b..e5fa2ddce320 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/fcntl.h> 15#include <linux/fcntl.h>
16#include <linux/random.h> 16#include <linux/random.h>
17#include <linux/slab.h>
17#include <linux/cache.h> 18#include <linux/cache.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/time.h> 20#include <linux/time.h>
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index eaf3e2c8646a..a2ca6aed763b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -19,6 +19,7 @@
19#include <linux/random.h> 19#include <linux/random.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <linux/slab.h>
22 23
23#include <net/inet_frag.h> 24#include <net/inet_frag.h>
24 25
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cc94cc2d8b2d..c5af909cf701 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/kmemcheck.h> 12#include <linux/kmemcheck.h>
13#include <linux/slab.h>
13#include <net/inet_hashtables.h> 14#include <net/inet_hashtables.h>
14#include <net/inet_timewait_sock.h> 15#include <net/inet_timewait_sock.h>
15#include <net/ip.h> 16#include <net/ip.h>
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 9f2cd47ceeb7..56cdf68a074c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -25,6 +25,7 @@
25#include <linux/ip.h> 25#include <linux/ip.h>
26#include <linux/icmp.h> 26#include <linux/icmp.h>
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/slab.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <net/ip.h> 30#include <net/ip.h>
30#include <net/tcp.h> 31#include <net/tcp.h>
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b59430bc041c..75347ea70ea0 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -32,6 +32,7 @@
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/jhash.h> 33#include <linux/jhash.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <linux/slab.h>
35#include <net/route.h> 36#include <net/route.h>
36#include <net/dst.h> 37#include <net/dst.h>
37#include <net/sock.h> 38#include <net/sock.h>
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f47c9f76754b..fe381d12ecdd 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/slab.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
@@ -810,11 +811,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
810 tunnel->err_count = 0; 811 tunnel->err_count = 0;
811 } 812 }
812 813
813 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; 814 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
814 815
815 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 816 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
816 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 817 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
817 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 818 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
819 if (max_headroom > dev->needed_headroom)
820 dev->needed_headroom = max_headroom;
818 if (!new_skb) { 821 if (!new_skb) {
819 ip_rt_put(rt); 822 ip_rt_put(rt);
820 txq->tx_dropped++; 823 txq->tx_dropped++;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 091b5c7e04e1..af76de5f76de 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -119,6 +119,7 @@
119#include <linux/kernel.h> 119#include <linux/kernel.h>
120#include <linux/string.h> 120#include <linux/string.h>
121#include <linux/errno.h> 121#include <linux/errno.h>
122#include <linux/slab.h>
122 123
123#include <linux/net.h> 124#include <linux/net.h>
124#include <linux/socket.h> 125#include <linux/socket.h>
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 94bf105ef3c9..4c09a31fd140 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/capability.h> 12#include <linux/capability.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <asm/uaccess.h> 16#include <asm/uaccess.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index f09135e1e14f..b0b2e3059f11 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -51,6 +51,7 @@
51#include <linux/string.h> 51#include <linux/string.h>
52#include <linux/errno.h> 52#include <linux/errno.h>
53#include <linux/highmem.h> 53#include <linux/highmem.h>
54#include <linux/slab.h>
54 55
55#include <linux/socket.h> 56#include <linux/socket.h>
56#include <linux/sockios.h> 57#include <linux/sockios.h>
@@ -310,7 +311,7 @@ int ip_output(struct sk_buff *skb)
310 !(IPCB(skb)->flags & IPSKB_REROUTED)); 311 !(IPCB(skb)->flags & IPSKB_REROUTED));
311} 312}
312 313
313int ip_queue_xmit(struct sk_buff *skb, int ipfragok) 314int ip_queue_xmit(struct sk_buff *skb)
314{ 315{
315 struct sock *sk = skb->sk; 316 struct sock *sk = skb->sk;
316 struct inet_sock *inet = inet_sk(sk); 317 struct inet_sock *inet = inet_sk(sk);
@@ -369,7 +370,7 @@ packet_routed:
369 skb_reset_network_header(skb); 370 skb_reset_network_header(skb);
370 iph = ip_hdr(skb); 371 iph = ip_hdr(skb);
371 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 372 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
372 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) 373 if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
373 iph->frag_off = htons(IP_DF); 374 iph->frag_off = htons(IP_DF);
374 else 375 else
375 iph->frag_off = 0; 376 iph->frag_off = 0;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 644dc43a55de..b0aa0546a3b3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -23,6 +23,7 @@
23#include <linux/icmp.h> 23#include <linux/icmp.h>
24#include <linux/inetdevice.h> 24#include <linux/inetdevice.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/slab.h>
26#include <net/sock.h> 27#include <net/sock.h>
27#include <net/ip.h> 28#include <net/ip.h>
28#include <net/icmp.h> 29#include <net/icmp.h>
@@ -286,12 +287,8 @@ int ip_ra_control(struct sock *sk, unsigned char on,
286void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 287void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
287 __be16 port, u32 info, u8 *payload) 288 __be16 port, u32 info, u8 *payload)
288{ 289{
289 struct inet_sock *inet = inet_sk(sk);
290 struct sock_exterr_skb *serr; 290 struct sock_exterr_skb *serr;
291 291
292 if (!inet->recverr)
293 return;
294
295 skb = skb_clone(skb, GFP_ATOMIC); 292 skb = skb_clone(skb, GFP_ATOMIC);
296 if (!skb) 293 if (!skb)
297 return; 294 return;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 678909281648..b9d84e800cf4 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -53,6 +53,7 @@
53#include <linux/root_dev.h> 53#include <linux/root_dev.h>
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <linux/nfs_fs.h> 55#include <linux/nfs_fs.h>
56#include <linux/slab.h>
56#include <net/net_namespace.h> 57#include <net/net_namespace.h>
57#include <net/arp.h> 58#include <net/arp.h>
58#include <net/ip.h> 59#include <net/ip.h>
@@ -975,7 +976,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
975 /* Is it a reply for the device we are configuring? */ 976 /* Is it a reply for the device we are configuring? */
976 if (b->xid != ic_dev_xid) { 977 if (b->xid != ic_dev_xid) {
977 if (net_ratelimit()) 978 if (net_ratelimit())
978 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n"); 979 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n");
979 goto drop_unlock; 980 goto drop_unlock;
980 } 981 }
981 982
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 2f302d3ac9a3..0b27b14dcc9d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -95,6 +95,7 @@
95#include <linux/module.h> 95#include <linux/module.h>
96#include <linux/types.h> 96#include <linux/types.h>
97#include <linux/kernel.h> 97#include <linux/kernel.h>
98#include <linux/slab.h>
98#include <asm/uaccess.h> 99#include <asm/uaccess.h>
99#include <linux/skbuff.h> 100#include <linux/skbuff.h>
100#include <linux/netdevice.h> 101#include <linux/netdevice.h>
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 1d42f6103c8d..1aa498d7a0a5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -47,6 +47,7 @@
47#include <linux/mroute.h> 47#include <linux/mroute.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/if_ether.h> 49#include <linux/if_ether.h>
50#include <linux/slab.h>
50#include <net/net_namespace.h> 51#include <net/net_namespace.h>
51#include <net/ip.h> 52#include <net/ip.h>
52#include <net/protocol.h> 53#include <net/protocol.h>
@@ -62,11 +63,40 @@
62#include <net/ipip.h> 63#include <net/ipip.h>
63#include <net/checksum.h> 64#include <net/checksum.h>
64#include <net/netlink.h> 65#include <net/netlink.h>
66#include <net/fib_rules.h>
65 67
66#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 68#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67#define CONFIG_IP_PIMSM 1 69#define CONFIG_IP_PIMSM 1
68#endif 70#endif
69 71
72struct mr_table {
73 struct list_head list;
74#ifdef CONFIG_NET_NS
75 struct net *net;
76#endif
77 u32 id;
78 struct sock *mroute_sk;
79 struct timer_list ipmr_expire_timer;
80 struct list_head mfc_unres_queue;
81 struct list_head mfc_cache_array[MFC_LINES];
82 struct vif_device vif_table[MAXVIFS];
83 int maxvif;
84 atomic_t cache_resolve_queue_len;
85 int mroute_do_assert;
86 int mroute_do_pim;
87#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88 int mroute_reg_vif_num;
89#endif
90};
91
92struct ipmr_rule {
93 struct fib_rule common;
94};
95
96struct ipmr_result {
97 struct mr_table *mrt;
98};
99
70/* Big lock, protecting vif table, mrt cache and mroute socket state. 100/* Big lock, protecting vif table, mrt cache and mroute socket state.
71 Note that the changes are semaphored via rtnl_lock. 101 Note that the changes are semaphored via rtnl_lock.
72 */ 102 */
@@ -77,9 +107,7 @@ static DEFINE_RWLOCK(mrt_lock);
77 * Multicast router control variables 107 * Multicast router control variables
78 */ 108 */
79 109
80#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) 110#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
81
82static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
83 111
84/* Special spinlock for queue of unresolved entries */ 112/* Special spinlock for queue of unresolved entries */
85static DEFINE_SPINLOCK(mfc_unres_lock); 113static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -94,12 +122,215 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
94 122
95static struct kmem_cache *mrt_cachep __read_mostly; 123static struct kmem_cache *mrt_cachep __read_mostly;
96 124
97static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); 125static struct mr_table *ipmr_new_table(struct net *net, u32 id);
98static int ipmr_cache_report(struct net *net, 126static int ip_mr_forward(struct net *net, struct mr_table *mrt,
127 struct sk_buff *skb, struct mfc_cache *cache,
128 int local);
129static int ipmr_cache_report(struct mr_table *mrt,
99 struct sk_buff *pkt, vifi_t vifi, int assert); 130 struct sk_buff *pkt, vifi_t vifi, int assert);
100static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); 131static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
132 struct mfc_cache *c, struct rtmsg *rtm);
133static void ipmr_expire_process(unsigned long arg);
134
135#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
136#define ipmr_for_each_table(mrt, net) \
137 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
138
139static struct mr_table *ipmr_get_table(struct net *net, u32 id)
140{
141 struct mr_table *mrt;
142
143 ipmr_for_each_table(mrt, net) {
144 if (mrt->id == id)
145 return mrt;
146 }
147 return NULL;
148}
149
150static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
151 struct mr_table **mrt)
152{
153 struct ipmr_result res;
154 struct fib_lookup_arg arg = { .result = &res, };
155 int err;
156
157 err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
158 if (err < 0)
159 return err;
160 *mrt = res.mrt;
161 return 0;
162}
163
164static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
165 int flags, struct fib_lookup_arg *arg)
166{
167 struct ipmr_result *res = arg->result;
168 struct mr_table *mrt;
169
170 switch (rule->action) {
171 case FR_ACT_TO_TBL:
172 break;
173 case FR_ACT_UNREACHABLE:
174 return -ENETUNREACH;
175 case FR_ACT_PROHIBIT:
176 return -EACCES;
177 case FR_ACT_BLACKHOLE:
178 default:
179 return -EINVAL;
180 }
181
182 mrt = ipmr_get_table(rule->fr_net, rule->table);
183 if (mrt == NULL)
184 return -EAGAIN;
185 res->mrt = mrt;
186 return 0;
187}
188
189static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
190{
191 return 1;
192}
193
194static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
195 FRA_GENERIC_POLICY,
196};
197
198static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
199 struct fib_rule_hdr *frh, struct nlattr **tb)
200{
201 return 0;
202}
203
204static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
205 struct nlattr **tb)
206{
207 return 1;
208}
209
210static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
211 struct fib_rule_hdr *frh)
212{
213 frh->dst_len = 0;
214 frh->src_len = 0;
215 frh->tos = 0;
216 return 0;
217}
218
219static struct fib_rules_ops ipmr_rules_ops_template = {
220 .family = FIB_RULES_IPMR,
221 .rule_size = sizeof(struct ipmr_rule),
222 .addr_size = sizeof(u32),
223 .action = ipmr_rule_action,
224 .match = ipmr_rule_match,
225 .configure = ipmr_rule_configure,
226 .compare = ipmr_rule_compare,
227 .default_pref = fib_default_rule_pref,
228 .fill = ipmr_rule_fill,
229 .nlgroup = RTNLGRP_IPV4_RULE,
230 .policy = ipmr_rule_policy,
231 .owner = THIS_MODULE,
232};
233
234static int __net_init ipmr_rules_init(struct net *net)
235{
236 struct fib_rules_ops *ops;
237 struct mr_table *mrt;
238 int err;
239
240 ops = fib_rules_register(&ipmr_rules_ops_template, net);
241 if (IS_ERR(ops))
242 return PTR_ERR(ops);
243
244 INIT_LIST_HEAD(&net->ipv4.mr_tables);
245
246 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
247 if (mrt == NULL) {
248 err = -ENOMEM;
249 goto err1;
250 }
251
252 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
253 if (err < 0)
254 goto err2;
255
256 net->ipv4.mr_rules_ops = ops;
257 return 0;
258
259err2:
260 kfree(mrt);
261err1:
262 fib_rules_unregister(ops);
263 return err;
264}
265
266static void __net_exit ipmr_rules_exit(struct net *net)
267{
268 struct mr_table *mrt, *next;
269
270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
271 kfree(mrt);
272 fib_rules_unregister(net->ipv4.mr_rules_ops);
273}
274#else
275#define ipmr_for_each_table(mrt, net) \
276 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
277
278static struct mr_table *ipmr_get_table(struct net *net, u32 id)
279{
280 return net->ipv4.mrt;
281}
282
283static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
284 struct mr_table **mrt)
285{
286 *mrt = net->ipv4.mrt;
287 return 0;
288}
289
290static int __net_init ipmr_rules_init(struct net *net)
291{
292 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
293 return net->ipv4.mrt ? 0 : -ENOMEM;
294}
295
296static void __net_exit ipmr_rules_exit(struct net *net)
297{
298 kfree(net->ipv4.mrt);
299}
300#endif
301
302static struct mr_table *ipmr_new_table(struct net *net, u32 id)
303{
304 struct mr_table *mrt;
305 unsigned int i;
101 306
102static struct timer_list ipmr_expire_timer; 307 mrt = ipmr_get_table(net, id);
308 if (mrt != NULL)
309 return mrt;
310
311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
312 if (mrt == NULL)
313 return NULL;
314 write_pnet(&mrt->net, net);
315 mrt->id = id;
316
317 /* Forwarding cache */
318 for (i = 0; i < MFC_LINES; i++)
319 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
320
321 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
322
323 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
324 (unsigned long)mrt);
325
326#ifdef CONFIG_IP_PIMSM
327 mrt->mroute_reg_vif_num = -1;
328#endif
329#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
331#endif
332 return mrt;
333}
103 334
104/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 335/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
105 336
@@ -200,12 +431,22 @@ failure:
200static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 431static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
201{ 432{
202 struct net *net = dev_net(dev); 433 struct net *net = dev_net(dev);
434 struct mr_table *mrt;
435 struct flowi fl = {
436 .oif = dev->ifindex,
437 .iif = skb->skb_iif,
438 .mark = skb->mark,
439 };
440 int err;
441
442 err = ipmr_fib_lookup(net, &fl, &mrt);
443 if (err < 0)
444 return err;
203 445
204 read_lock(&mrt_lock); 446 read_lock(&mrt_lock);
205 dev->stats.tx_bytes += skb->len; 447 dev->stats.tx_bytes += skb->len;
206 dev->stats.tx_packets++; 448 dev->stats.tx_packets++;
207 ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, 449 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
208 IGMPMSG_WHOLEPKT);
209 read_unlock(&mrt_lock); 450 read_unlock(&mrt_lock);
210 kfree_skb(skb); 451 kfree_skb(skb);
211 return NETDEV_TX_OK; 452 return NETDEV_TX_OK;
@@ -225,12 +466,18 @@ static void reg_vif_setup(struct net_device *dev)
225 dev->features |= NETIF_F_NETNS_LOCAL; 466 dev->features |= NETIF_F_NETNS_LOCAL;
226} 467}
227 468
228static struct net_device *ipmr_reg_vif(struct net *net) 469static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
229{ 470{
230 struct net_device *dev; 471 struct net_device *dev;
231 struct in_device *in_dev; 472 struct in_device *in_dev;
473 char name[IFNAMSIZ];
232 474
233 dev = alloc_netdev(0, "pimreg", reg_vif_setup); 475 if (mrt->id == RT_TABLE_DEFAULT)
476 sprintf(name, "pimreg");
477 else
478 sprintf(name, "pimreg%u", mrt->id);
479
480 dev = alloc_netdev(0, name, reg_vif_setup);
234 481
235 if (dev == NULL) 482 if (dev == NULL)
236 return NULL; 483 return NULL;
@@ -275,17 +522,17 @@ failure:
275 * @notify: Set to 1, if the caller is a notifier_call 522 * @notify: Set to 1, if the caller is a notifier_call
276 */ 523 */
277 524
278static int vif_delete(struct net *net, int vifi, int notify, 525static int vif_delete(struct mr_table *mrt, int vifi, int notify,
279 struct list_head *head) 526 struct list_head *head)
280{ 527{
281 struct vif_device *v; 528 struct vif_device *v;
282 struct net_device *dev; 529 struct net_device *dev;
283 struct in_device *in_dev; 530 struct in_device *in_dev;
284 531
285 if (vifi < 0 || vifi >= net->ipv4.maxvif) 532 if (vifi < 0 || vifi >= mrt->maxvif)
286 return -EADDRNOTAVAIL; 533 return -EADDRNOTAVAIL;
287 534
288 v = &net->ipv4.vif_table[vifi]; 535 v = &mrt->vif_table[vifi];
289 536
290 write_lock_bh(&mrt_lock); 537 write_lock_bh(&mrt_lock);
291 dev = v->dev; 538 dev = v->dev;
@@ -297,17 +544,17 @@ static int vif_delete(struct net *net, int vifi, int notify,
297 } 544 }
298 545
299#ifdef CONFIG_IP_PIMSM 546#ifdef CONFIG_IP_PIMSM
300 if (vifi == net->ipv4.mroute_reg_vif_num) 547 if (vifi == mrt->mroute_reg_vif_num)
301 net->ipv4.mroute_reg_vif_num = -1; 548 mrt->mroute_reg_vif_num = -1;
302#endif 549#endif
303 550
304 if (vifi+1 == net->ipv4.maxvif) { 551 if (vifi+1 == mrt->maxvif) {
305 int tmp; 552 int tmp;
306 for (tmp=vifi-1; tmp>=0; tmp--) { 553 for (tmp=vifi-1; tmp>=0; tmp--) {
307 if (VIF_EXISTS(net, tmp)) 554 if (VIF_EXISTS(mrt, tmp))
308 break; 555 break;
309 } 556 }
310 net->ipv4.maxvif = tmp+1; 557 mrt->maxvif = tmp+1;
311 } 558 }
312 559
313 write_unlock_bh(&mrt_lock); 560 write_unlock_bh(&mrt_lock);
@@ -328,7 +575,6 @@ static int vif_delete(struct net *net, int vifi, int notify,
328 575
329static inline void ipmr_cache_free(struct mfc_cache *c) 576static inline void ipmr_cache_free(struct mfc_cache *c)
330{ 577{
331 release_net(mfc_net(c));
332 kmem_cache_free(mrt_cachep, c); 578 kmem_cache_free(mrt_cachep, c);
333} 579}
334 580
@@ -336,13 +582,13 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
336 and reporting error to netlink readers. 582 and reporting error to netlink readers.
337 */ 583 */
338 584
339static void ipmr_destroy_unres(struct mfc_cache *c) 585static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
340{ 586{
587 struct net *net = read_pnet(&mrt->net);
341 struct sk_buff *skb; 588 struct sk_buff *skb;
342 struct nlmsgerr *e; 589 struct nlmsgerr *e;
343 struct net *net = mfc_net(c);
344 590
345 atomic_dec(&net->ipv4.cache_resolve_queue_len); 591 atomic_dec(&mrt->cache_resolve_queue_len);
346 592
347 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { 593 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
348 if (ip_hdr(skb)->version == 0) { 594 if (ip_hdr(skb)->version == 0) {
@@ -363,42 +609,40 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
363} 609}
364 610
365 611
366/* Single timer process for all the unresolved queue. */ 612/* Timer process for the unresolved queue. */
367 613
368static void ipmr_expire_process(unsigned long dummy) 614static void ipmr_expire_process(unsigned long arg)
369{ 615{
616 struct mr_table *mrt = (struct mr_table *)arg;
370 unsigned long now; 617 unsigned long now;
371 unsigned long expires; 618 unsigned long expires;
372 struct mfc_cache *c, **cp; 619 struct mfc_cache *c, *next;
373 620
374 if (!spin_trylock(&mfc_unres_lock)) { 621 if (!spin_trylock(&mfc_unres_lock)) {
375 mod_timer(&ipmr_expire_timer, jiffies+HZ/10); 622 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
376 return; 623 return;
377 } 624 }
378 625
379 if (mfc_unres_queue == NULL) 626 if (list_empty(&mrt->mfc_unres_queue))
380 goto out; 627 goto out;
381 628
382 now = jiffies; 629 now = jiffies;
383 expires = 10*HZ; 630 expires = 10*HZ;
384 cp = &mfc_unres_queue;
385 631
386 while ((c=*cp) != NULL) { 632 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
387 if (time_after(c->mfc_un.unres.expires, now)) { 633 if (time_after(c->mfc_un.unres.expires, now)) {
388 unsigned long interval = c->mfc_un.unres.expires - now; 634 unsigned long interval = c->mfc_un.unres.expires - now;
389 if (interval < expires) 635 if (interval < expires)
390 expires = interval; 636 expires = interval;
391 cp = &c->next;
392 continue; 637 continue;
393 } 638 }
394 639
395 *cp = c->next; 640 list_del(&c->list);
396 641 ipmr_destroy_unres(mrt, c);
397 ipmr_destroy_unres(c);
398 } 642 }
399 643
400 if (mfc_unres_queue != NULL) 644 if (!list_empty(&mrt->mfc_unres_queue))
401 mod_timer(&ipmr_expire_timer, jiffies + expires); 645 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
402 646
403out: 647out:
404 spin_unlock(&mfc_unres_lock); 648 spin_unlock(&mfc_unres_lock);
@@ -406,17 +650,17 @@ out:
406 650
407/* Fill oifs list. It is called under write locked mrt_lock. */ 651/* Fill oifs list. It is called under write locked mrt_lock. */
408 652
409static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) 653static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
654 unsigned char *ttls)
410{ 655{
411 int vifi; 656 int vifi;
412 struct net *net = mfc_net(cache);
413 657
414 cache->mfc_un.res.minvif = MAXVIFS; 658 cache->mfc_un.res.minvif = MAXVIFS;
415 cache->mfc_un.res.maxvif = 0; 659 cache->mfc_un.res.maxvif = 0;
416 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 660 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
417 661
418 for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { 662 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
419 if (VIF_EXISTS(net, vifi) && 663 if (VIF_EXISTS(mrt, vifi) &&
420 ttls[vifi] && ttls[vifi] < 255) { 664 ttls[vifi] && ttls[vifi] < 255) {
421 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 665 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
422 if (cache->mfc_un.res.minvif > vifi) 666 if (cache->mfc_un.res.minvif > vifi)
@@ -427,16 +671,17 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
427 } 671 }
428} 672}
429 673
430static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) 674static int vif_add(struct net *net, struct mr_table *mrt,
675 struct vifctl *vifc, int mrtsock)
431{ 676{
432 int vifi = vifc->vifc_vifi; 677 int vifi = vifc->vifc_vifi;
433 struct vif_device *v = &net->ipv4.vif_table[vifi]; 678 struct vif_device *v = &mrt->vif_table[vifi];
434 struct net_device *dev; 679 struct net_device *dev;
435 struct in_device *in_dev; 680 struct in_device *in_dev;
436 int err; 681 int err;
437 682
438 /* Is vif busy ? */ 683 /* Is vif busy ? */
439 if (VIF_EXISTS(net, vifi)) 684 if (VIF_EXISTS(mrt, vifi))
440 return -EADDRINUSE; 685 return -EADDRINUSE;
441 686
442 switch (vifc->vifc_flags) { 687 switch (vifc->vifc_flags) {
@@ -446,9 +691,9 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
446 * Special Purpose VIF in PIM 691 * Special Purpose VIF in PIM
447 * All the packets will be sent to the daemon 692 * All the packets will be sent to the daemon
448 */ 693 */
449 if (net->ipv4.mroute_reg_vif_num >= 0) 694 if (mrt->mroute_reg_vif_num >= 0)
450 return -EADDRINUSE; 695 return -EADDRINUSE;
451 dev = ipmr_reg_vif(net); 696 dev = ipmr_reg_vif(net, mrt);
452 if (!dev) 697 if (!dev)
453 return -ENOBUFS; 698 return -ENOBUFS;
454 err = dev_set_allmulti(dev, 1); 699 err = dev_set_allmulti(dev, 1);
@@ -524,49 +769,47 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
524 v->dev = dev; 769 v->dev = dev;
525#ifdef CONFIG_IP_PIMSM 770#ifdef CONFIG_IP_PIMSM
526 if (v->flags&VIFF_REGISTER) 771 if (v->flags&VIFF_REGISTER)
527 net->ipv4.mroute_reg_vif_num = vifi; 772 mrt->mroute_reg_vif_num = vifi;
528#endif 773#endif
529 if (vifi+1 > net->ipv4.maxvif) 774 if (vifi+1 > mrt->maxvif)
530 net->ipv4.maxvif = vifi+1; 775 mrt->maxvif = vifi+1;
531 write_unlock_bh(&mrt_lock); 776 write_unlock_bh(&mrt_lock);
532 return 0; 777 return 0;
533} 778}
534 779
535static struct mfc_cache *ipmr_cache_find(struct net *net, 780static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
536 __be32 origin, 781 __be32 origin,
537 __be32 mcastgrp) 782 __be32 mcastgrp)
538{ 783{
539 int line = MFC_HASH(mcastgrp, origin); 784 int line = MFC_HASH(mcastgrp, origin);
540 struct mfc_cache *c; 785 struct mfc_cache *c;
541 786
542 for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { 787 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
543 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) 788 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
544 break; 789 return c;
545 } 790 }
546 return c; 791 return NULL;
547} 792}
548 793
549/* 794/*
550 * Allocate a multicast cache entry 795 * Allocate a multicast cache entry
551 */ 796 */
552static struct mfc_cache *ipmr_cache_alloc(struct net *net) 797static struct mfc_cache *ipmr_cache_alloc(void)
553{ 798{
554 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 799 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
555 if (c == NULL) 800 if (c == NULL)
556 return NULL; 801 return NULL;
557 c->mfc_un.res.minvif = MAXVIFS; 802 c->mfc_un.res.minvif = MAXVIFS;
558 mfc_net_set(c, net);
559 return c; 803 return c;
560} 804}
561 805
562static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) 806static struct mfc_cache *ipmr_cache_alloc_unres(void)
563{ 807{
564 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 808 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
565 if (c == NULL) 809 if (c == NULL)
566 return NULL; 810 return NULL;
567 skb_queue_head_init(&c->mfc_un.unres.unresolved); 811 skb_queue_head_init(&c->mfc_un.unres.unresolved);
568 c->mfc_un.unres.expires = jiffies + 10*HZ; 812 c->mfc_un.unres.expires = jiffies + 10*HZ;
569 mfc_net_set(c, net);
570 return c; 813 return c;
571} 814}
572 815
@@ -574,7 +817,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
574 * A cache entry has gone into a resolved state from queued 817 * A cache entry has gone into a resolved state from queued
575 */ 818 */
576 819
577static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) 820static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
821 struct mfc_cache *uc, struct mfc_cache *c)
578{ 822{
579 struct sk_buff *skb; 823 struct sk_buff *skb;
580 struct nlmsgerr *e; 824 struct nlmsgerr *e;
@@ -587,7 +831,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
587 if (ip_hdr(skb)->version == 0) { 831 if (ip_hdr(skb)->version == 0) {
588 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
589 833
590 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { 834 if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
591 nlh->nlmsg_len = (skb_tail_pointer(skb) - 835 nlh->nlmsg_len = (skb_tail_pointer(skb) -
592 (u8 *)nlh); 836 (u8 *)nlh);
593 } else { 837 } else {
@@ -599,9 +843,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
599 memset(&e->msg, 0, sizeof(e->msg)); 843 memset(&e->msg, 0, sizeof(e->msg));
600 } 844 }
601 845
602 rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); 846 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
603 } else 847 } else
604 ip_mr_forward(skb, c, 0); 848 ip_mr_forward(net, mrt, skb, c, 0);
605 } 849 }
606} 850}
607 851
@@ -612,7 +856,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
612 * Called under mrt_lock. 856 * Called under mrt_lock.
613 */ 857 */
614 858
615static int ipmr_cache_report(struct net *net, 859static int ipmr_cache_report(struct mr_table *mrt,
616 struct sk_buff *pkt, vifi_t vifi, int assert) 860 struct sk_buff *pkt, vifi_t vifi, int assert)
617{ 861{
618 struct sk_buff *skb; 862 struct sk_buff *skb;
@@ -645,7 +889,7 @@ static int ipmr_cache_report(struct net *net,
645 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 889 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
646 msg->im_msgtype = IGMPMSG_WHOLEPKT; 890 msg->im_msgtype = IGMPMSG_WHOLEPKT;
647 msg->im_mbz = 0; 891 msg->im_mbz = 0;
648 msg->im_vif = net->ipv4.mroute_reg_vif_num; 892 msg->im_vif = mrt->mroute_reg_vif_num;
649 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 893 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
650 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 894 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
651 sizeof(struct iphdr)); 895 sizeof(struct iphdr));
@@ -677,7 +921,7 @@ static int ipmr_cache_report(struct net *net,
677 skb->transport_header = skb->network_header; 921 skb->transport_header = skb->network_header;
678 } 922 }
679 923
680 if (net->ipv4.mroute_sk == NULL) { 924 if (mrt->mroute_sk == NULL) {
681 kfree_skb(skb); 925 kfree_skb(skb);
682 return -EINVAL; 926 return -EINVAL;
683 } 927 }
@@ -685,7 +929,7 @@ static int ipmr_cache_report(struct net *net,
685 /* 929 /*
686 * Deliver to mrouted 930 * Deliver to mrouted
687 */ 931 */
688 ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); 932 ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
689 if (ret < 0) { 933 if (ret < 0) {
690 if (net_ratelimit()) 934 if (net_ratelimit())
691 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); 935 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
@@ -700,27 +944,29 @@ static int ipmr_cache_report(struct net *net,
700 */ 944 */
701 945
702static int 946static int
703ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) 947ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
704{ 948{
949 bool found = false;
705 int err; 950 int err;
706 struct mfc_cache *c; 951 struct mfc_cache *c;
707 const struct iphdr *iph = ip_hdr(skb); 952 const struct iphdr *iph = ip_hdr(skb);
708 953
709 spin_lock_bh(&mfc_unres_lock); 954 spin_lock_bh(&mfc_unres_lock);
710 for (c=mfc_unres_queue; c; c=c->next) { 955 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
711 if (net_eq(mfc_net(c), net) && 956 if (c->mfc_mcastgrp == iph->daddr &&
712 c->mfc_mcastgrp == iph->daddr && 957 c->mfc_origin == iph->saddr) {
713 c->mfc_origin == iph->saddr) 958 found = true;
714 break; 959 break;
960 }
715 } 961 }
716 962
717 if (c == NULL) { 963 if (!found) {
718 /* 964 /*
719 * Create a new entry if allowable 965 * Create a new entry if allowable
720 */ 966 */
721 967
722 if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || 968 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
723 (c = ipmr_cache_alloc_unres(net)) == NULL) { 969 (c = ipmr_cache_alloc_unres()) == NULL) {
724 spin_unlock_bh(&mfc_unres_lock); 970 spin_unlock_bh(&mfc_unres_lock);
725 971
726 kfree_skb(skb); 972 kfree_skb(skb);
@@ -737,7 +983,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
737 /* 983 /*
738 * Reflect first query at mrouted. 984 * Reflect first query at mrouted.
739 */ 985 */
740 err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); 986 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
741 if (err < 0) { 987 if (err < 0) {
742 /* If the report failed throw the cache entry 988 /* If the report failed throw the cache entry
743 out - Brad Parker 989 out - Brad Parker
@@ -749,11 +995,10 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
749 return err; 995 return err;
750 } 996 }
751 997
752 atomic_inc(&net->ipv4.cache_resolve_queue_len); 998 atomic_inc(&mrt->cache_resolve_queue_len);
753 c->next = mfc_unres_queue; 999 list_add(&c->list, &mrt->mfc_unres_queue);
754 mfc_unres_queue = c;
755 1000
756 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires); 1001 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
757 } 1002 }
758 1003
759 /* 1004 /*
@@ -775,19 +1020,18 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
775 * MFC cache manipulation by user space mroute daemon 1020 * MFC cache manipulation by user space mroute daemon
776 */ 1021 */
777 1022
778static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) 1023static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
779{ 1024{
780 int line; 1025 int line;
781 struct mfc_cache *c, **cp; 1026 struct mfc_cache *c, *next;
782 1027
783 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1028 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
784 1029
785 for (cp = &net->ipv4.mfc_cache_array[line]; 1030 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
786 (c = *cp) != NULL; cp = &c->next) {
787 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1031 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
788 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1032 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
789 write_lock_bh(&mrt_lock); 1033 write_lock_bh(&mrt_lock);
790 *cp = c->next; 1034 list_del(&c->list);
791 write_unlock_bh(&mrt_lock); 1035 write_unlock_bh(&mrt_lock);
792 1036
793 ipmr_cache_free(c); 1037 ipmr_cache_free(c);
@@ -797,24 +1041,30 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
797 return -ENOENT; 1041 return -ENOENT;
798} 1042}
799 1043
800static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) 1044static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1045 struct mfcctl *mfc, int mrtsock)
801{ 1046{
1047 bool found = false;
802 int line; 1048 int line;
803 struct mfc_cache *uc, *c, **cp; 1049 struct mfc_cache *uc, *c;
1050
1051 if (mfc->mfcc_parent >= MAXVIFS)
1052 return -ENFILE;
804 1053
805 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1054 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
806 1055
807 for (cp = &net->ipv4.mfc_cache_array[line]; 1056 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
808 (c = *cp) != NULL; cp = &c->next) {
809 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1057 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
810 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) 1058 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1059 found = true;
811 break; 1060 break;
1061 }
812 } 1062 }
813 1063
814 if (c != NULL) { 1064 if (found) {
815 write_lock_bh(&mrt_lock); 1065 write_lock_bh(&mrt_lock);
816 c->mfc_parent = mfc->mfcc_parent; 1066 c->mfc_parent = mfc->mfcc_parent;
817 ipmr_update_thresholds(c, mfc->mfcc_ttls); 1067 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
818 if (!mrtsock) 1068 if (!mrtsock)
819 c->mfc_flags |= MFC_STATIC; 1069 c->mfc_flags |= MFC_STATIC;
820 write_unlock_bh(&mrt_lock); 1070 write_unlock_bh(&mrt_lock);
@@ -824,43 +1074,42 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
824 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 1074 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
825 return -EINVAL; 1075 return -EINVAL;
826 1076
827 c = ipmr_cache_alloc(net); 1077 c = ipmr_cache_alloc();
828 if (c == NULL) 1078 if (c == NULL)
829 return -ENOMEM; 1079 return -ENOMEM;
830 1080
831 c->mfc_origin = mfc->mfcc_origin.s_addr; 1081 c->mfc_origin = mfc->mfcc_origin.s_addr;
832 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; 1082 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
833 c->mfc_parent = mfc->mfcc_parent; 1083 c->mfc_parent = mfc->mfcc_parent;
834 ipmr_update_thresholds(c, mfc->mfcc_ttls); 1084 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
835 if (!mrtsock) 1085 if (!mrtsock)
836 c->mfc_flags |= MFC_STATIC; 1086 c->mfc_flags |= MFC_STATIC;
837 1087
838 write_lock_bh(&mrt_lock); 1088 write_lock_bh(&mrt_lock);
839 c->next = net->ipv4.mfc_cache_array[line]; 1089 list_add(&c->list, &mrt->mfc_cache_array[line]);
840 net->ipv4.mfc_cache_array[line] = c;
841 write_unlock_bh(&mrt_lock); 1090 write_unlock_bh(&mrt_lock);
842 1091
843 /* 1092 /*
844 * Check to see if we resolved a queued list. If so we 1093 * Check to see if we resolved a queued list. If so we
845 * need to send on the frames and tidy up. 1094 * need to send on the frames and tidy up.
846 */ 1095 */
1096 found = false;
847 spin_lock_bh(&mfc_unres_lock); 1097 spin_lock_bh(&mfc_unres_lock);
848 for (cp = &mfc_unres_queue; (uc=*cp) != NULL; 1098 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
849 cp = &uc->next) { 1099 if (uc->mfc_origin == c->mfc_origin &&
850 if (net_eq(mfc_net(uc), net) &&
851 uc->mfc_origin == c->mfc_origin &&
852 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 1100 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
853 *cp = uc->next; 1101 list_del(&uc->list);
854 atomic_dec(&net->ipv4.cache_resolve_queue_len); 1102 atomic_dec(&mrt->cache_resolve_queue_len);
1103 found = true;
855 break; 1104 break;
856 } 1105 }
857 } 1106 }
858 if (mfc_unres_queue == NULL) 1107 if (list_empty(&mrt->mfc_unres_queue))
859 del_timer(&ipmr_expire_timer); 1108 del_timer(&mrt->ipmr_expire_timer);
860 spin_unlock_bh(&mfc_unres_lock); 1109 spin_unlock_bh(&mfc_unres_lock);
861 1110
862 if (uc) { 1111 if (found) {
863 ipmr_cache_resolve(uc, c); 1112 ipmr_cache_resolve(net, mrt, uc, c);
864 ipmr_cache_free(uc); 1113 ipmr_cache_free(uc);
865 } 1114 }
866 return 0; 1115 return 0;
@@ -870,53 +1119,41 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
870 * Close the multicast socket, and clear the vif tables etc 1119 * Close the multicast socket, and clear the vif tables etc
871 */ 1120 */
872 1121
873static void mroute_clean_tables(struct net *net) 1122static void mroute_clean_tables(struct mr_table *mrt)
874{ 1123{
875 int i; 1124 int i;
876 LIST_HEAD(list); 1125 LIST_HEAD(list);
1126 struct mfc_cache *c, *next;
877 1127
878 /* 1128 /*
879 * Shut down all active vif entries 1129 * Shut down all active vif entries
880 */ 1130 */
881 for (i = 0; i < net->ipv4.maxvif; i++) { 1131 for (i = 0; i < mrt->maxvif; i++) {
882 if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) 1132 if (!(mrt->vif_table[i].flags&VIFF_STATIC))
883 vif_delete(net, i, 0, &list); 1133 vif_delete(mrt, i, 0, &list);
884 } 1134 }
885 unregister_netdevice_many(&list); 1135 unregister_netdevice_many(&list);
886 1136
887 /* 1137 /*
888 * Wipe the cache 1138 * Wipe the cache
889 */ 1139 */
890 for (i=0; i<MFC_LINES; i++) { 1140 for (i = 0; i < MFC_LINES; i++) {
891 struct mfc_cache *c, **cp; 1141 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
892 1142 if (c->mfc_flags&MFC_STATIC)
893 cp = &net->ipv4.mfc_cache_array[i];
894 while ((c = *cp) != NULL) {
895 if (c->mfc_flags&MFC_STATIC) {
896 cp = &c->next;
897 continue; 1143 continue;
898 }
899 write_lock_bh(&mrt_lock); 1144 write_lock_bh(&mrt_lock);
900 *cp = c->next; 1145 list_del(&c->list);
901 write_unlock_bh(&mrt_lock); 1146 write_unlock_bh(&mrt_lock);
902 1147
903 ipmr_cache_free(c); 1148 ipmr_cache_free(c);
904 } 1149 }
905 } 1150 }
906 1151
907 if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { 1152 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
908 struct mfc_cache *c, **cp;
909
910 spin_lock_bh(&mfc_unres_lock); 1153 spin_lock_bh(&mfc_unres_lock);
911 cp = &mfc_unres_queue; 1154 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
912 while ((c = *cp) != NULL) { 1155 list_del(&c->list);
913 if (!net_eq(mfc_net(c), net)) { 1156 ipmr_destroy_unres(mrt, c);
914 cp = &c->next;
915 continue;
916 }
917 *cp = c->next;
918
919 ipmr_destroy_unres(c);
920 } 1157 }
921 spin_unlock_bh(&mfc_unres_lock); 1158 spin_unlock_bh(&mfc_unres_lock);
922 } 1159 }
@@ -925,16 +1162,19 @@ static void mroute_clean_tables(struct net *net)
925static void mrtsock_destruct(struct sock *sk) 1162static void mrtsock_destruct(struct sock *sk)
926{ 1163{
927 struct net *net = sock_net(sk); 1164 struct net *net = sock_net(sk);
1165 struct mr_table *mrt;
928 1166
929 rtnl_lock(); 1167 rtnl_lock();
930 if (sk == net->ipv4.mroute_sk) { 1168 ipmr_for_each_table(mrt, net) {
931 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1169 if (sk == mrt->mroute_sk) {
1170 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
932 1171
933 write_lock_bh(&mrt_lock); 1172 write_lock_bh(&mrt_lock);
934 net->ipv4.mroute_sk = NULL; 1173 mrt->mroute_sk = NULL;
935 write_unlock_bh(&mrt_lock); 1174 write_unlock_bh(&mrt_lock);
936 1175
937 mroute_clean_tables(net); 1176 mroute_clean_tables(mrt);
1177 }
938 } 1178 }
939 rtnl_unlock(); 1179 rtnl_unlock();
940} 1180}
@@ -952,9 +1192,14 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
952 struct vifctl vif; 1192 struct vifctl vif;
953 struct mfcctl mfc; 1193 struct mfcctl mfc;
954 struct net *net = sock_net(sk); 1194 struct net *net = sock_net(sk);
1195 struct mr_table *mrt;
1196
1197 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1198 if (mrt == NULL)
1199 return -ENOENT;
955 1200
956 if (optname != MRT_INIT) { 1201 if (optname != MRT_INIT) {
957 if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) 1202 if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
958 return -EACCES; 1203 return -EACCES;
959 } 1204 }
960 1205
@@ -967,7 +1212,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
967 return -ENOPROTOOPT; 1212 return -ENOPROTOOPT;
968 1213
969 rtnl_lock(); 1214 rtnl_lock();
970 if (net->ipv4.mroute_sk) { 1215 if (mrt->mroute_sk) {
971 rtnl_unlock(); 1216 rtnl_unlock();
972 return -EADDRINUSE; 1217 return -EADDRINUSE;
973 } 1218 }
@@ -975,7 +1220,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
975 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1220 ret = ip_ra_control(sk, 1, mrtsock_destruct);
976 if (ret == 0) { 1221 if (ret == 0) {
977 write_lock_bh(&mrt_lock); 1222 write_lock_bh(&mrt_lock);
978 net->ipv4.mroute_sk = sk; 1223 mrt->mroute_sk = sk;
979 write_unlock_bh(&mrt_lock); 1224 write_unlock_bh(&mrt_lock);
980 1225
981 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1226 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
@@ -983,7 +1228,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
983 rtnl_unlock(); 1228 rtnl_unlock();
984 return ret; 1229 return ret;
985 case MRT_DONE: 1230 case MRT_DONE:
986 if (sk != net->ipv4.mroute_sk) 1231 if (sk != mrt->mroute_sk)
987 return -EACCES; 1232 return -EACCES;
988 return ip_ra_control(sk, 0, NULL); 1233 return ip_ra_control(sk, 0, NULL);
989 case MRT_ADD_VIF: 1234 case MRT_ADD_VIF:
@@ -996,9 +1241,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
996 return -ENFILE; 1241 return -ENFILE;
997 rtnl_lock(); 1242 rtnl_lock();
998 if (optname == MRT_ADD_VIF) { 1243 if (optname == MRT_ADD_VIF) {
999 ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); 1244 ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
1000 } else { 1245 } else {
1001 ret = vif_delete(net, vif.vifc_vifi, 0, NULL); 1246 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1002 } 1247 }
1003 rtnl_unlock(); 1248 rtnl_unlock();
1004 return ret; 1249 return ret;
@@ -1015,9 +1260,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1015 return -EFAULT; 1260 return -EFAULT;
1016 rtnl_lock(); 1261 rtnl_lock();
1017 if (optname == MRT_DEL_MFC) 1262 if (optname == MRT_DEL_MFC)
1018 ret = ipmr_mfc_delete(net, &mfc); 1263 ret = ipmr_mfc_delete(mrt, &mfc);
1019 else 1264 else
1020 ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); 1265 ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
1021 rtnl_unlock(); 1266 rtnl_unlock();
1022 return ret; 1267 return ret;
1023 /* 1268 /*
@@ -1028,7 +1273,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1028 int v; 1273 int v;
1029 if (get_user(v,(int __user *)optval)) 1274 if (get_user(v,(int __user *)optval))
1030 return -EFAULT; 1275 return -EFAULT;
1031 net->ipv4.mroute_do_assert = (v) ? 1 : 0; 1276 mrt->mroute_do_assert = (v) ? 1 : 0;
1032 return 0; 1277 return 0;
1033 } 1278 }
1034#ifdef CONFIG_IP_PIMSM 1279#ifdef CONFIG_IP_PIMSM
@@ -1042,14 +1287,35 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1042 1287
1043 rtnl_lock(); 1288 rtnl_lock();
1044 ret = 0; 1289 ret = 0;
1045 if (v != net->ipv4.mroute_do_pim) { 1290 if (v != mrt->mroute_do_pim) {
1046 net->ipv4.mroute_do_pim = v; 1291 mrt->mroute_do_pim = v;
1047 net->ipv4.mroute_do_assert = v; 1292 mrt->mroute_do_assert = v;
1048 } 1293 }
1049 rtnl_unlock(); 1294 rtnl_unlock();
1050 return ret; 1295 return ret;
1051 } 1296 }
1052#endif 1297#endif
1298#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1299 case MRT_TABLE:
1300 {
1301 u32 v;
1302
1303 if (optlen != sizeof(u32))
1304 return -EINVAL;
1305 if (get_user(v, (u32 __user *)optval))
1306 return -EFAULT;
1307 if (sk == mrt->mroute_sk)
1308 return -EBUSY;
1309
1310 rtnl_lock();
1311 ret = 0;
1312 if (!ipmr_new_table(net, v))
1313 ret = -ENOMEM;
1314 raw_sk(sk)->ipmr_table = v;
1315 rtnl_unlock();
1316 return ret;
1317 }
1318#endif
1053 /* 1319 /*
1054 * Spurious command, or MRT_VERSION which you cannot 1320 * Spurious command, or MRT_VERSION which you cannot
1055 * set. 1321 * set.
@@ -1068,6 +1334,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1068 int olr; 1334 int olr;
1069 int val; 1335 int val;
1070 struct net *net = sock_net(sk); 1336 struct net *net = sock_net(sk);
1337 struct mr_table *mrt;
1338
1339 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1340 if (mrt == NULL)
1341 return -ENOENT;
1071 1342
1072 if (optname != MRT_VERSION && 1343 if (optname != MRT_VERSION &&
1073#ifdef CONFIG_IP_PIMSM 1344#ifdef CONFIG_IP_PIMSM
@@ -1089,10 +1360,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1089 val = 0x0305; 1360 val = 0x0305;
1090#ifdef CONFIG_IP_PIMSM 1361#ifdef CONFIG_IP_PIMSM
1091 else if (optname == MRT_PIM) 1362 else if (optname == MRT_PIM)
1092 val = net->ipv4.mroute_do_pim; 1363 val = mrt->mroute_do_pim;
1093#endif 1364#endif
1094 else 1365 else
1095 val = net->ipv4.mroute_do_assert; 1366 val = mrt->mroute_do_assert;
1096 if (copy_to_user(optval, &val, olr)) 1367 if (copy_to_user(optval, &val, olr))
1097 return -EFAULT; 1368 return -EFAULT;
1098 return 0; 1369 return 0;
@@ -1109,16 +1380,21 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1109 struct vif_device *vif; 1380 struct vif_device *vif;
1110 struct mfc_cache *c; 1381 struct mfc_cache *c;
1111 struct net *net = sock_net(sk); 1382 struct net *net = sock_net(sk);
1383 struct mr_table *mrt;
1384
1385 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1386 if (mrt == NULL)
1387 return -ENOENT;
1112 1388
1113 switch (cmd) { 1389 switch (cmd) {
1114 case SIOCGETVIFCNT: 1390 case SIOCGETVIFCNT:
1115 if (copy_from_user(&vr, arg, sizeof(vr))) 1391 if (copy_from_user(&vr, arg, sizeof(vr)))
1116 return -EFAULT; 1392 return -EFAULT;
1117 if (vr.vifi >= net->ipv4.maxvif) 1393 if (vr.vifi >= mrt->maxvif)
1118 return -EINVAL; 1394 return -EINVAL;
1119 read_lock(&mrt_lock); 1395 read_lock(&mrt_lock);
1120 vif = &net->ipv4.vif_table[vr.vifi]; 1396 vif = &mrt->vif_table[vr.vifi];
1121 if (VIF_EXISTS(net, vr.vifi)) { 1397 if (VIF_EXISTS(mrt, vr.vifi)) {
1122 vr.icount = vif->pkt_in; 1398 vr.icount = vif->pkt_in;
1123 vr.ocount = vif->pkt_out; 1399 vr.ocount = vif->pkt_out;
1124 vr.ibytes = vif->bytes_in; 1400 vr.ibytes = vif->bytes_in;
@@ -1136,7 +1412,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1136 return -EFAULT; 1412 return -EFAULT;
1137 1413
1138 read_lock(&mrt_lock); 1414 read_lock(&mrt_lock);
1139 c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); 1415 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1140 if (c) { 1416 if (c) {
1141 sr.pktcnt = c->mfc_un.res.pkt; 1417 sr.pktcnt = c->mfc_un.res.pkt;
1142 sr.bytecnt = c->mfc_un.res.bytes; 1418 sr.bytecnt = c->mfc_un.res.bytes;
@@ -1159,16 +1435,20 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1159{ 1435{
1160 struct net_device *dev = ptr; 1436 struct net_device *dev = ptr;
1161 struct net *net = dev_net(dev); 1437 struct net *net = dev_net(dev);
1438 struct mr_table *mrt;
1162 struct vif_device *v; 1439 struct vif_device *v;
1163 int ct; 1440 int ct;
1164 LIST_HEAD(list); 1441 LIST_HEAD(list);
1165 1442
1166 if (event != NETDEV_UNREGISTER) 1443 if (event != NETDEV_UNREGISTER)
1167 return NOTIFY_DONE; 1444 return NOTIFY_DONE;
1168 v = &net->ipv4.vif_table[0]; 1445
1169 for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { 1446 ipmr_for_each_table(mrt, net) {
1170 if (v->dev == dev) 1447 v = &mrt->vif_table[0];
1171 vif_delete(net, ct, 1, &list); 1448 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1449 if (v->dev == dev)
1450 vif_delete(mrt, ct, 1, &list);
1451 }
1172 } 1452 }
1173 unregister_netdevice_many(&list); 1453 unregister_netdevice_many(&list);
1174 return NOTIFY_DONE; 1454 return NOTIFY_DONE;
@@ -1227,11 +1507,11 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1227 * Processing handlers for ipmr_forward 1507 * Processing handlers for ipmr_forward
1228 */ 1508 */
1229 1509
1230static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) 1510static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1511 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1231{ 1512{
1232 struct net *net = mfc_net(c);
1233 const struct iphdr *iph = ip_hdr(skb); 1513 const struct iphdr *iph = ip_hdr(skb);
1234 struct vif_device *vif = &net->ipv4.vif_table[vifi]; 1514 struct vif_device *vif = &mrt->vif_table[vifi];
1235 struct net_device *dev; 1515 struct net_device *dev;
1236 struct rtable *rt; 1516 struct rtable *rt;
1237 int encap = 0; 1517 int encap = 0;
@@ -1245,7 +1525,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1245 vif->bytes_out += skb->len; 1525 vif->bytes_out += skb->len;
1246 vif->dev->stats.tx_bytes += skb->len; 1526 vif->dev->stats.tx_bytes += skb->len;
1247 vif->dev->stats.tx_packets++; 1527 vif->dev->stats.tx_packets++;
1248 ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); 1528 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1249 goto out_free; 1529 goto out_free;
1250 } 1530 }
1251#endif 1531#endif
@@ -1328,12 +1608,12 @@ out_free:
1328 return; 1608 return;
1329} 1609}
1330 1610
1331static int ipmr_find_vif(struct net_device *dev) 1611static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1332{ 1612{
1333 struct net *net = dev_net(dev);
1334 int ct; 1613 int ct;
1335 for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { 1614
1336 if (net->ipv4.vif_table[ct].dev == dev) 1615 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1616 if (mrt->vif_table[ct].dev == dev)
1337 break; 1617 break;
1338 } 1618 }
1339 return ct; 1619 return ct;
@@ -1341,11 +1621,12 @@ static int ipmr_find_vif(struct net_device *dev)
1341 1621
1342/* "local" means that we should preserve one skb (for local delivery) */ 1622/* "local" means that we should preserve one skb (for local delivery) */
1343 1623
1344static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local) 1624static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1625 struct sk_buff *skb, struct mfc_cache *cache,
1626 int local)
1345{ 1627{
1346 int psend = -1; 1628 int psend = -1;
1347 int vif, ct; 1629 int vif, ct;
1348 struct net *net = mfc_net(cache);
1349 1630
1350 vif = cache->mfc_parent; 1631 vif = cache->mfc_parent;
1351 cache->mfc_un.res.pkt++; 1632 cache->mfc_un.res.pkt++;
@@ -1354,7 +1635,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1354 /* 1635 /*
1355 * Wrong interface: drop packet and (maybe) send PIM assert. 1636 * Wrong interface: drop packet and (maybe) send PIM assert.
1356 */ 1637 */
1357 if (net->ipv4.vif_table[vif].dev != skb->dev) { 1638 if (mrt->vif_table[vif].dev != skb->dev) {
1358 int true_vifi; 1639 int true_vifi;
1359 1640
1360 if (skb_rtable(skb)->fl.iif == 0) { 1641 if (skb_rtable(skb)->fl.iif == 0) {
@@ -1373,26 +1654,26 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1373 } 1654 }
1374 1655
1375 cache->mfc_un.res.wrong_if++; 1656 cache->mfc_un.res.wrong_if++;
1376 true_vifi = ipmr_find_vif(skb->dev); 1657 true_vifi = ipmr_find_vif(mrt, skb->dev);
1377 1658
1378 if (true_vifi >= 0 && net->ipv4.mroute_do_assert && 1659 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1379 /* pimsm uses asserts, when switching from RPT to SPT, 1660 /* pimsm uses asserts, when switching from RPT to SPT,
1380 so that we cannot check that packet arrived on an oif. 1661 so that we cannot check that packet arrived on an oif.
1381 It is bad, but otherwise we would need to move pretty 1662 It is bad, but otherwise we would need to move pretty
1382 large chunk of pimd to kernel. Ough... --ANK 1663 large chunk of pimd to kernel. Ough... --ANK
1383 */ 1664 */
1384 (net->ipv4.mroute_do_pim || 1665 (mrt->mroute_do_pim ||
1385 cache->mfc_un.res.ttls[true_vifi] < 255) && 1666 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1386 time_after(jiffies, 1667 time_after(jiffies,
1387 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1668 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1388 cache->mfc_un.res.last_assert = jiffies; 1669 cache->mfc_un.res.last_assert = jiffies;
1389 ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); 1670 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1390 } 1671 }
1391 goto dont_forward; 1672 goto dont_forward;
1392 } 1673 }
1393 1674
1394 net->ipv4.vif_table[vif].pkt_in++; 1675 mrt->vif_table[vif].pkt_in++;
1395 net->ipv4.vif_table[vif].bytes_in += skb->len; 1676 mrt->vif_table[vif].bytes_in += skb->len;
1396 1677
1397 /* 1678 /*
1398 * Forward the frame 1679 * Forward the frame
@@ -1402,7 +1683,8 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1402 if (psend != -1) { 1683 if (psend != -1) {
1403 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1684 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1404 if (skb2) 1685 if (skb2)
1405 ipmr_queue_xmit(skb2, cache, psend); 1686 ipmr_queue_xmit(net, mrt, skb2, cache,
1687 psend);
1406 } 1688 }
1407 psend = ct; 1689 psend = ct;
1408 } 1690 }
@@ -1411,9 +1693,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1411 if (local) { 1693 if (local) {
1412 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1694 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1413 if (skb2) 1695 if (skb2)
1414 ipmr_queue_xmit(skb2, cache, psend); 1696 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1415 } else { 1697 } else {
1416 ipmr_queue_xmit(skb, cache, psend); 1698 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1417 return 0; 1699 return 0;
1418 } 1700 }
1419 } 1701 }
@@ -1434,6 +1716,8 @@ int ip_mr_input(struct sk_buff *skb)
1434 struct mfc_cache *cache; 1716 struct mfc_cache *cache;
1435 struct net *net = dev_net(skb->dev); 1717 struct net *net = dev_net(skb->dev);
1436 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1718 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1719 struct mr_table *mrt;
1720 int err;
1437 1721
1438 /* Packet is looped back after forward, it should not be 1722 /* Packet is looped back after forward, it should not be
1439 forwarded second time, but still can be delivered locally. 1723 forwarded second time, but still can be delivered locally.
@@ -1441,6 +1725,10 @@ int ip_mr_input(struct sk_buff *skb)
1441 if (IPCB(skb)->flags&IPSKB_FORWARDED) 1725 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1442 goto dont_forward; 1726 goto dont_forward;
1443 1727
1728 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
1729 if (err < 0)
1730 return err;
1731
1444 if (!local) { 1732 if (!local) {
1445 if (IPCB(skb)->opt.router_alert) { 1733 if (IPCB(skb)->opt.router_alert) {
1446 if (ip_call_ra_chain(skb)) 1734 if (ip_call_ra_chain(skb))
@@ -1453,9 +1741,9 @@ int ip_mr_input(struct sk_buff *skb)
1453 that we can forward NO IGMP messages. 1741 that we can forward NO IGMP messages.
1454 */ 1742 */
1455 read_lock(&mrt_lock); 1743 read_lock(&mrt_lock);
1456 if (net->ipv4.mroute_sk) { 1744 if (mrt->mroute_sk) {
1457 nf_reset(skb); 1745 nf_reset(skb);
1458 raw_rcv(net->ipv4.mroute_sk, skb); 1746 raw_rcv(mrt->mroute_sk, skb);
1459 read_unlock(&mrt_lock); 1747 read_unlock(&mrt_lock);
1460 return 0; 1748 return 0;
1461 } 1749 }
@@ -1464,7 +1752,7 @@ int ip_mr_input(struct sk_buff *skb)
1464 } 1752 }
1465 1753
1466 read_lock(&mrt_lock); 1754 read_lock(&mrt_lock);
1467 cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1755 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1468 1756
1469 /* 1757 /*
1470 * No usable cache entry 1758 * No usable cache entry
@@ -1482,9 +1770,9 @@ int ip_mr_input(struct sk_buff *skb)
1482 skb = skb2; 1770 skb = skb2;
1483 } 1771 }
1484 1772
1485 vif = ipmr_find_vif(skb->dev); 1773 vif = ipmr_find_vif(mrt, skb->dev);
1486 if (vif >= 0) { 1774 if (vif >= 0) {
1487 int err = ipmr_cache_unresolved(net, vif, skb); 1775 int err = ipmr_cache_unresolved(mrt, vif, skb);
1488 read_unlock(&mrt_lock); 1776 read_unlock(&mrt_lock);
1489 1777
1490 return err; 1778 return err;
@@ -1494,7 +1782,7 @@ int ip_mr_input(struct sk_buff *skb)
1494 return -ENODEV; 1782 return -ENODEV;
1495 } 1783 }
1496 1784
1497 ip_mr_forward(skb, cache, local); 1785 ip_mr_forward(net, mrt, skb, cache, local);
1498 1786
1499 read_unlock(&mrt_lock); 1787 read_unlock(&mrt_lock);
1500 1788
@@ -1511,11 +1799,11 @@ dont_forward:
1511} 1799}
1512 1800
1513#ifdef CONFIG_IP_PIMSM 1801#ifdef CONFIG_IP_PIMSM
1514static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) 1802static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1803 unsigned int pimlen)
1515{ 1804{
1516 struct net_device *reg_dev = NULL; 1805 struct net_device *reg_dev = NULL;
1517 struct iphdr *encap; 1806 struct iphdr *encap;
1518 struct net *net = dev_net(skb->dev);
1519 1807
1520 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1808 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1521 /* 1809 /*
@@ -1530,8 +1818,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1530 return 1; 1818 return 1;
1531 1819
1532 read_lock(&mrt_lock); 1820 read_lock(&mrt_lock);
1533 if (net->ipv4.mroute_reg_vif_num >= 0) 1821 if (mrt->mroute_reg_vif_num >= 0)
1534 reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; 1822 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1535 if (reg_dev) 1823 if (reg_dev)
1536 dev_hold(reg_dev); 1824 dev_hold(reg_dev);
1537 read_unlock(&mrt_lock); 1825 read_unlock(&mrt_lock);
@@ -1566,17 +1854,21 @@ int pim_rcv_v1(struct sk_buff * skb)
1566{ 1854{
1567 struct igmphdr *pim; 1855 struct igmphdr *pim;
1568 struct net *net = dev_net(skb->dev); 1856 struct net *net = dev_net(skb->dev);
1857 struct mr_table *mrt;
1569 1858
1570 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1859 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1571 goto drop; 1860 goto drop;
1572 1861
1573 pim = igmp_hdr(skb); 1862 pim = igmp_hdr(skb);
1574 1863
1575 if (!net->ipv4.mroute_do_pim || 1864 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1865 goto drop;
1866
1867 if (!mrt->mroute_do_pim ||
1576 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1868 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1577 goto drop; 1869 goto drop;
1578 1870
1579 if (__pim_rcv(skb, sizeof(*pim))) { 1871 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1580drop: 1872drop:
1581 kfree_skb(skb); 1873 kfree_skb(skb);
1582 } 1874 }
@@ -1588,6 +1880,8 @@ drop:
1588static int pim_rcv(struct sk_buff * skb) 1880static int pim_rcv(struct sk_buff * skb)
1589{ 1881{
1590 struct pimreghdr *pim; 1882 struct pimreghdr *pim;
1883 struct net *net = dev_net(skb->dev);
1884 struct mr_table *mrt;
1591 1885
1592 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1886 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1593 goto drop; 1887 goto drop;
@@ -1599,7 +1893,10 @@ static int pim_rcv(struct sk_buff * skb)
1599 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1893 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1600 goto drop; 1894 goto drop;
1601 1895
1602 if (__pim_rcv(skb, sizeof(*pim))) { 1896 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1897 goto drop;
1898
1899 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1603drop: 1900drop:
1604 kfree_skb(skb); 1901 kfree_skb(skb);
1605 } 1902 }
@@ -1608,28 +1905,31 @@ drop:
1608#endif 1905#endif
1609 1906
1610static int 1907static int
1611ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) 1908ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c,
1909 struct rtmsg *rtm)
1612{ 1910{
1613 int ct; 1911 int ct;
1614 struct rtnexthop *nhp; 1912 struct rtnexthop *nhp;
1615 struct net *net = mfc_net(c);
1616 struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
1617 u8 *b = skb_tail_pointer(skb); 1913 u8 *b = skb_tail_pointer(skb);
1618 struct rtattr *mp_head; 1914 struct rtattr *mp_head;
1619 1915
1620 if (dev) 1916 /* If cache is unresolved, don't try to parse IIF and OIF */
1621 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1917 if (c->mfc_parent > MAXVIFS)
1918 return -ENOENT;
1919
1920 if (VIF_EXISTS(mrt, c->mfc_parent))
1921 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
1622 1922
1623 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1923 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1624 1924
1625 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1925 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1626 if (c->mfc_un.res.ttls[ct] < 255) { 1926 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
1627 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1927 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1628 goto rtattr_failure; 1928 goto rtattr_failure;
1629 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1929 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1630 nhp->rtnh_flags = 0; 1930 nhp->rtnh_flags = 0;
1631 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 1931 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1632 nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; 1932 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1633 nhp->rtnh_len = sizeof(*nhp); 1933 nhp->rtnh_len = sizeof(*nhp);
1634 } 1934 }
1635 } 1935 }
@@ -1647,11 +1947,16 @@ int ipmr_get_route(struct net *net,
1647 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 1947 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1648{ 1948{
1649 int err; 1949 int err;
1950 struct mr_table *mrt;
1650 struct mfc_cache *cache; 1951 struct mfc_cache *cache;
1651 struct rtable *rt = skb_rtable(skb); 1952 struct rtable *rt = skb_rtable(skb);
1652 1953
1954 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
1955 if (mrt == NULL)
1956 return -ENOENT;
1957
1653 read_lock(&mrt_lock); 1958 read_lock(&mrt_lock);
1654 cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); 1959 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
1655 1960
1656 if (cache == NULL) { 1961 if (cache == NULL) {
1657 struct sk_buff *skb2; 1962 struct sk_buff *skb2;
@@ -1665,7 +1970,7 @@ int ipmr_get_route(struct net *net,
1665 } 1970 }
1666 1971
1667 dev = skb->dev; 1972 dev = skb->dev;
1668 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) { 1973 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
1669 read_unlock(&mrt_lock); 1974 read_unlock(&mrt_lock);
1670 return -ENODEV; 1975 return -ENODEV;
1671 } 1976 }
@@ -1682,14 +1987,14 @@ int ipmr_get_route(struct net *net,
1682 iph->saddr = rt->rt_src; 1987 iph->saddr = rt->rt_src;
1683 iph->daddr = rt->rt_dst; 1988 iph->daddr = rt->rt_dst;
1684 iph->version = 0; 1989 iph->version = 0;
1685 err = ipmr_cache_unresolved(net, vif, skb2); 1990 err = ipmr_cache_unresolved(mrt, vif, skb2);
1686 read_unlock(&mrt_lock); 1991 read_unlock(&mrt_lock);
1687 return err; 1992 return err;
1688 } 1993 }
1689 1994
1690 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1995 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1691 cache->mfc_flags |= MFC_NOTIFY; 1996 cache->mfc_flags |= MFC_NOTIFY;
1692 err = ipmr_fill_mroute(skb, cache, rtm); 1997 err = ipmr_fill_mroute(mrt, skb, cache, rtm);
1693 read_unlock(&mrt_lock); 1998 read_unlock(&mrt_lock);
1694 return err; 1999 return err;
1695} 2000}
@@ -1700,6 +2005,7 @@ int ipmr_get_route(struct net *net,
1700 */ 2005 */
1701struct ipmr_vif_iter { 2006struct ipmr_vif_iter {
1702 struct seq_net_private p; 2007 struct seq_net_private p;
2008 struct mr_table *mrt;
1703 int ct; 2009 int ct;
1704}; 2010};
1705 2011
@@ -1707,11 +2013,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
1707 struct ipmr_vif_iter *iter, 2013 struct ipmr_vif_iter *iter,
1708 loff_t pos) 2014 loff_t pos)
1709{ 2015{
1710 for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { 2016 struct mr_table *mrt = iter->mrt;
1711 if (!VIF_EXISTS(net, iter->ct)) 2017
2018 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2019 if (!VIF_EXISTS(mrt, iter->ct))
1712 continue; 2020 continue;
1713 if (pos-- == 0) 2021 if (pos-- == 0)
1714 return &net->ipv4.vif_table[iter->ct]; 2022 return &mrt->vif_table[iter->ct];
1715 } 2023 }
1716 return NULL; 2024 return NULL;
1717} 2025}
@@ -1719,7 +2027,15 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
1719static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) 2027static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1720 __acquires(mrt_lock) 2028 __acquires(mrt_lock)
1721{ 2029{
2030 struct ipmr_vif_iter *iter = seq->private;
1722 struct net *net = seq_file_net(seq); 2031 struct net *net = seq_file_net(seq);
2032 struct mr_table *mrt;
2033
2034 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2035 if (mrt == NULL)
2036 return ERR_PTR(-ENOENT);
2037
2038 iter->mrt = mrt;
1723 2039
1724 read_lock(&mrt_lock); 2040 read_lock(&mrt_lock);
1725 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) 2041 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
@@ -1730,15 +2046,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1730{ 2046{
1731 struct ipmr_vif_iter *iter = seq->private; 2047 struct ipmr_vif_iter *iter = seq->private;
1732 struct net *net = seq_file_net(seq); 2048 struct net *net = seq_file_net(seq);
2049 struct mr_table *mrt = iter->mrt;
1733 2050
1734 ++*pos; 2051 ++*pos;
1735 if (v == SEQ_START_TOKEN) 2052 if (v == SEQ_START_TOKEN)
1736 return ipmr_vif_seq_idx(net, iter, 0); 2053 return ipmr_vif_seq_idx(net, iter, 0);
1737 2054
1738 while (++iter->ct < net->ipv4.maxvif) { 2055 while (++iter->ct < mrt->maxvif) {
1739 if (!VIF_EXISTS(net, iter->ct)) 2056 if (!VIF_EXISTS(mrt, iter->ct))
1740 continue; 2057 continue;
1741 return &net->ipv4.vif_table[iter->ct]; 2058 return &mrt->vif_table[iter->ct];
1742 } 2059 }
1743 return NULL; 2060 return NULL;
1744} 2061}
@@ -1751,7 +2068,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1751 2068
1752static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 2069static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1753{ 2070{
1754 struct net *net = seq_file_net(seq); 2071 struct ipmr_vif_iter *iter = seq->private;
2072 struct mr_table *mrt = iter->mrt;
1755 2073
1756 if (v == SEQ_START_TOKEN) { 2074 if (v == SEQ_START_TOKEN) {
1757 seq_puts(seq, 2075 seq_puts(seq,
@@ -1762,7 +2080,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1762 2080
1763 seq_printf(seq, 2081 seq_printf(seq,
1764 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 2082 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1765 vif - net->ipv4.vif_table, 2083 vif - mrt->vif_table,
1766 name, vif->bytes_in, vif->pkt_in, 2084 name, vif->bytes_in, vif->pkt_in,
1767 vif->bytes_out, vif->pkt_out, 2085 vif->bytes_out, vif->pkt_out,
1768 vif->flags, vif->local, vif->remote); 2086 vif->flags, vif->local, vif->remote);
@@ -1793,7 +2111,8 @@ static const struct file_operations ipmr_vif_fops = {
1793 2111
1794struct ipmr_mfc_iter { 2112struct ipmr_mfc_iter {
1795 struct seq_net_private p; 2113 struct seq_net_private p;
1796 struct mfc_cache **cache; 2114 struct mr_table *mrt;
2115 struct list_head *cache;
1797 int ct; 2116 int ct;
1798}; 2117};
1799 2118
@@ -1801,22 +2120,22 @@ struct ipmr_mfc_iter {
1801static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, 2120static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
1802 struct ipmr_mfc_iter *it, loff_t pos) 2121 struct ipmr_mfc_iter *it, loff_t pos)
1803{ 2122{
2123 struct mr_table *mrt = it->mrt;
1804 struct mfc_cache *mfc; 2124 struct mfc_cache *mfc;
1805 2125
1806 it->cache = net->ipv4.mfc_cache_array;
1807 read_lock(&mrt_lock); 2126 read_lock(&mrt_lock);
1808 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) 2127 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
1809 for (mfc = net->ipv4.mfc_cache_array[it->ct]; 2128 it->cache = &mrt->mfc_cache_array[it->ct];
1810 mfc; mfc = mfc->next) 2129 list_for_each_entry(mfc, it->cache, list)
1811 if (pos-- == 0) 2130 if (pos-- == 0)
1812 return mfc; 2131 return mfc;
2132 }
1813 read_unlock(&mrt_lock); 2133 read_unlock(&mrt_lock);
1814 2134
1815 it->cache = &mfc_unres_queue;
1816 spin_lock_bh(&mfc_unres_lock); 2135 spin_lock_bh(&mfc_unres_lock);
1817 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) 2136 it->cache = &mrt->mfc_unres_queue;
1818 if (net_eq(mfc_net(mfc), net) && 2137 list_for_each_entry(mfc, it->cache, list)
1819 pos-- == 0) 2138 if (pos-- == 0)
1820 return mfc; 2139 return mfc;
1821 spin_unlock_bh(&mfc_unres_lock); 2140 spin_unlock_bh(&mfc_unres_lock);
1822 2141
@@ -1829,7 +2148,13 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1829{ 2148{
1830 struct ipmr_mfc_iter *it = seq->private; 2149 struct ipmr_mfc_iter *it = seq->private;
1831 struct net *net = seq_file_net(seq); 2150 struct net *net = seq_file_net(seq);
2151 struct mr_table *mrt;
2152
2153 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2154 if (mrt == NULL)
2155 return ERR_PTR(-ENOENT);
1832 2156
2157 it->mrt = mrt;
1833 it->cache = NULL; 2158 it->cache = NULL;
1834 it->ct = 0; 2159 it->ct = 0;
1835 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) 2160 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
@@ -1841,37 +2166,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1841 struct mfc_cache *mfc = v; 2166 struct mfc_cache *mfc = v;
1842 struct ipmr_mfc_iter *it = seq->private; 2167 struct ipmr_mfc_iter *it = seq->private;
1843 struct net *net = seq_file_net(seq); 2168 struct net *net = seq_file_net(seq);
2169 struct mr_table *mrt = it->mrt;
1844 2170
1845 ++*pos; 2171 ++*pos;
1846 2172
1847 if (v == SEQ_START_TOKEN) 2173 if (v == SEQ_START_TOKEN)
1848 return ipmr_mfc_seq_idx(net, seq->private, 0); 2174 return ipmr_mfc_seq_idx(net, seq->private, 0);
1849 2175
1850 if (mfc->next) 2176 if (mfc->list.next != it->cache)
1851 return mfc->next; 2177 return list_entry(mfc->list.next, struct mfc_cache, list);
1852 2178
1853 if (it->cache == &mfc_unres_queue) 2179 if (it->cache == &mrt->mfc_unres_queue)
1854 goto end_of_list; 2180 goto end_of_list;
1855 2181
1856 BUG_ON(it->cache != net->ipv4.mfc_cache_array); 2182 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
1857 2183
1858 while (++it->ct < MFC_LINES) { 2184 while (++it->ct < MFC_LINES) {
1859 mfc = net->ipv4.mfc_cache_array[it->ct]; 2185 it->cache = &mrt->mfc_cache_array[it->ct];
1860 if (mfc) 2186 if (list_empty(it->cache))
1861 return mfc; 2187 continue;
2188 return list_first_entry(it->cache, struct mfc_cache, list);
1862 } 2189 }
1863 2190
1864 /* exhausted cache_array, show unresolved */ 2191 /* exhausted cache_array, show unresolved */
1865 read_unlock(&mrt_lock); 2192 read_unlock(&mrt_lock);
1866 it->cache = &mfc_unres_queue; 2193 it->cache = &mrt->mfc_unres_queue;
1867 it->ct = 0; 2194 it->ct = 0;
1868 2195
1869 spin_lock_bh(&mfc_unres_lock); 2196 spin_lock_bh(&mfc_unres_lock);
1870 mfc = mfc_unres_queue; 2197 if (!list_empty(it->cache))
1871 while (mfc && !net_eq(mfc_net(mfc), net)) 2198 return list_first_entry(it->cache, struct mfc_cache, list);
1872 mfc = mfc->next;
1873 if (mfc)
1874 return mfc;
1875 2199
1876 end_of_list: 2200 end_of_list:
1877 spin_unlock_bh(&mfc_unres_lock); 2201 spin_unlock_bh(&mfc_unres_lock);
@@ -1883,18 +2207,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1883static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) 2207static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1884{ 2208{
1885 struct ipmr_mfc_iter *it = seq->private; 2209 struct ipmr_mfc_iter *it = seq->private;
1886 struct net *net = seq_file_net(seq); 2210 struct mr_table *mrt = it->mrt;
1887 2211
1888 if (it->cache == &mfc_unres_queue) 2212 if (it->cache == &mrt->mfc_unres_queue)
1889 spin_unlock_bh(&mfc_unres_lock); 2213 spin_unlock_bh(&mfc_unres_lock);
1890 else if (it->cache == net->ipv4.mfc_cache_array) 2214 else if (it->cache == &mrt->mfc_cache_array[it->ct])
1891 read_unlock(&mrt_lock); 2215 read_unlock(&mrt_lock);
1892} 2216}
1893 2217
1894static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 2218static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1895{ 2219{
1896 int n; 2220 int n;
1897 struct net *net = seq_file_net(seq);
1898 2221
1899 if (v == SEQ_START_TOKEN) { 2222 if (v == SEQ_START_TOKEN) {
1900 seq_puts(seq, 2223 seq_puts(seq,
@@ -1902,20 +2225,21 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1902 } else { 2225 } else {
1903 const struct mfc_cache *mfc = v; 2226 const struct mfc_cache *mfc = v;
1904 const struct ipmr_mfc_iter *it = seq->private; 2227 const struct ipmr_mfc_iter *it = seq->private;
2228 const struct mr_table *mrt = it->mrt;
1905 2229
1906 seq_printf(seq, "%08lX %08lX %-3hd", 2230 seq_printf(seq, "%08lX %08lX %-3hd",
1907 (unsigned long) mfc->mfc_mcastgrp, 2231 (unsigned long) mfc->mfc_mcastgrp,
1908 (unsigned long) mfc->mfc_origin, 2232 (unsigned long) mfc->mfc_origin,
1909 mfc->mfc_parent); 2233 mfc->mfc_parent);
1910 2234
1911 if (it->cache != &mfc_unres_queue) { 2235 if (it->cache != &mrt->mfc_unres_queue) {
1912 seq_printf(seq, " %8lu %8lu %8lu", 2236 seq_printf(seq, " %8lu %8lu %8lu",
1913 mfc->mfc_un.res.pkt, 2237 mfc->mfc_un.res.pkt,
1914 mfc->mfc_un.res.bytes, 2238 mfc->mfc_un.res.bytes,
1915 mfc->mfc_un.res.wrong_if); 2239 mfc->mfc_un.res.wrong_if);
1916 for (n = mfc->mfc_un.res.minvif; 2240 for (n = mfc->mfc_un.res.minvif;
1917 n < mfc->mfc_un.res.maxvif; n++ ) { 2241 n < mfc->mfc_un.res.maxvif; n++ ) {
1918 if (VIF_EXISTS(net, n) && 2242 if (VIF_EXISTS(mrt, n) &&
1919 mfc->mfc_un.res.ttls[n] < 255) 2243 mfc->mfc_un.res.ttls[n] < 255)
1920 seq_printf(seq, 2244 seq_printf(seq,
1921 " %2d:%-3d", 2245 " %2d:%-3d",
@@ -1967,27 +2291,11 @@ static const struct net_protocol pim_protocol = {
1967 */ 2291 */
1968static int __net_init ipmr_net_init(struct net *net) 2292static int __net_init ipmr_net_init(struct net *net)
1969{ 2293{
1970 int err = 0; 2294 int err;
1971 2295
1972 net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), 2296 err = ipmr_rules_init(net);
1973 GFP_KERNEL); 2297 if (err < 0)
1974 if (!net->ipv4.vif_table) {
1975 err = -ENOMEM;
1976 goto fail; 2298 goto fail;
1977 }
1978
1979 /* Forwarding cache */
1980 net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
1981 sizeof(struct mfc_cache *),
1982 GFP_KERNEL);
1983 if (!net->ipv4.mfc_cache_array) {
1984 err = -ENOMEM;
1985 goto fail_mfc_cache;
1986 }
1987
1988#ifdef CONFIG_IP_PIMSM
1989 net->ipv4.mroute_reg_vif_num = -1;
1990#endif
1991 2299
1992#ifdef CONFIG_PROC_FS 2300#ifdef CONFIG_PROC_FS
1993 err = -ENOMEM; 2301 err = -ENOMEM;
@@ -2002,10 +2310,8 @@ static int __net_init ipmr_net_init(struct net *net)
2002proc_cache_fail: 2310proc_cache_fail:
2003 proc_net_remove(net, "ip_mr_vif"); 2311 proc_net_remove(net, "ip_mr_vif");
2004proc_vif_fail: 2312proc_vif_fail:
2005 kfree(net->ipv4.mfc_cache_array); 2313 ipmr_rules_exit(net);
2006#endif 2314#endif
2007fail_mfc_cache:
2008 kfree(net->ipv4.vif_table);
2009fail: 2315fail:
2010 return err; 2316 return err;
2011} 2317}
@@ -2016,8 +2322,7 @@ static void __net_exit ipmr_net_exit(struct net *net)
2016 proc_net_remove(net, "ip_mr_cache"); 2322 proc_net_remove(net, "ip_mr_cache");
2017 proc_net_remove(net, "ip_mr_vif"); 2323 proc_net_remove(net, "ip_mr_vif");
2018#endif 2324#endif
2019 kfree(net->ipv4.mfc_cache_array); 2325 ipmr_rules_exit(net);
2020 kfree(net->ipv4.vif_table);
2021} 2326}
2022 2327
2023static struct pernet_operations ipmr_net_ops = { 2328static struct pernet_operations ipmr_net_ops = {
@@ -2040,7 +2345,6 @@ int __init ip_mr_init(void)
2040 if (err) 2345 if (err)
2041 goto reg_pernet_fail; 2346 goto reg_pernet_fail;
2042 2347
2043 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
2044 err = register_netdevice_notifier(&ip_mr_notifier); 2348 err = register_netdevice_notifier(&ip_mr_notifier);
2045 if (err) 2349 if (err)
2046 goto reg_notif_fail; 2350 goto reg_notif_fail;
@@ -2058,7 +2362,6 @@ add_proto_fail:
2058 unregister_netdevice_notifier(&ip_mr_notifier); 2362 unregister_netdevice_notifier(&ip_mr_notifier);
2059#endif 2363#endif
2060reg_notif_fail: 2364reg_notif_fail:
2061 del_timer(&ipmr_expire_timer);
2062 unregister_pernet_subsys(&ipmr_net_ops); 2365 unregister_pernet_subsys(&ipmr_net_ops);
2063reg_pernet_fail: 2366reg_pernet_fail:
2064 kmem_cache_destroy(mrt_cachep); 2367 kmem_cache_destroy(mrt_cachep);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c14623fc4d5e..82fb43c5c59e 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -4,6 +4,7 @@
4#include <linux/netfilter_ipv4.h> 4#include <linux/netfilter_ipv4.h>
5#include <linux/ip.h> 5#include <linux/ip.h>
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/gfp.h>
7#include <net/route.h> 8#include <net/route.h>
8#include <net/xfrm.h> 9#include <net/xfrm.h>
9#include <net/ip.h> 10#include <net/ip.h>
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index bfe26f32b930..79ca5e70d497 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/netfilter/x_tables.h> 9#include <linux/netfilter/x_tables.h>
10#include <linux/netfilter_arp/arp_tables.h> 10#include <linux/netfilter_arp/arp_tables.h>
11#include <linux/slab.h>
11 12
12MODULE_LICENSE("GPL"); 13MODULE_LICENSE("GPL");
13MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 14MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index d781513282d4..c838238104f5 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -26,6 +26,7 @@
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/slab.h>
29#include <net/net_namespace.h> 30#include <net/net_namespace.h>
30#include <net/sock.h> 31#include <net/sock.h>
31#include <net/route.h> 32#include <net/route.h>
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index c6be74e57264..8815d458de46 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -15,6 +15,7 @@
15#include <linux/jhash.h> 15#include <linux/jhash.h>
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/slab.h>
18#include <linux/ip.h> 19#include <linux/ip.h>
19#include <linux/tcp.h> 20#include <linux/tcp.h>
20#include <linux/udp.h> 21#include <linux/udp.h>
@@ -88,7 +89,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
88 list_del(&c->list); 89 list_del(&c->list);
89 write_unlock_bh(&clusterip_lock); 90 write_unlock_bh(&clusterip_lock);
90 91
91 dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0); 92 dev_mc_del(c->dev, c->clustermac);
92 dev_put(c->dev); 93 dev_put(c->dev);
93 94
94 /* In case anyone still accesses the file, the open/close 95 /* In case anyone still accesses the file, the open/close
@@ -397,7 +398,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
397 dev_put(dev); 398 dev_put(dev);
398 return -ENOMEM; 399 return -ENOMEM;
399 } 400 }
400 dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); 401 dev_mc_add(config->dev, config->clustermac);
401 } 402 }
402 } 403 }
403 cipinfo->config = config; 404 cipinfo->config = config;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 038fa0bb8f6b..a86135a28058 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -12,6 +12,7 @@
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15#include <linux/ip.h> 16#include <linux/ip.h>
16#include <linux/udp.h> 17#include <linux/udp.h>
17#include <linux/icmp.h> 18#include <linux/icmp.h>
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 04c86dc5d538..8f60749e87a3 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/socket.h> 35#include <linux/socket.h>
36#include <linux/slab.h>
36#include <linux/skbuff.h> 37#include <linux/skbuff.h>
37#include <linux/kernel.h> 38#include <linux/kernel.h>
38#include <linux/timer.h> 39#include <linux/timer.h>
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c8dc9800d620..55392466daa4 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/netfilter_ipv4/ip_tables.h> 15#include <linux/netfilter_ipv4/ip_tables.h>
16#include <linux/slab.h>
16#include <net/ip.h> 17#include <net/ip.h>
17 18
18MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index b9b83464cbf4..294a2a32f293 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -12,6 +12,7 @@
12#include <linux/netfilter_ipv4/ip_tables.h> 12#include <linux/netfilter_ipv4/ip_tables.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15#include <net/sock.h> 16#include <net/sock.h>
16#include <net/route.h> 17#include <net/route.h>
17#include <linux/ip.h> 18#include <linux/ip.h>
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 06fb9d11953c..07fb710cd722 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/netfilter_ipv4/ip_tables.h> 7#include <linux/netfilter_ipv4/ip_tables.h>
8#include <linux/slab.h>
8#include <net/ip.h> 9#include <net/ip.h>
9 10
10#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 11#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index cce2f64e6f21..be45bdc4c602 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -17,6 +17,7 @@
17 */ 17 */
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/netfilter_ipv4/ip_tables.h> 19#include <linux/netfilter_ipv4/ip_tables.h>
20#include <linux/slab.h>
20#include <net/ip.h> 21#include <net/ip.h>
21 22
22MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 4595281c2863..4f8bddb760c9 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/timer.h> 13#include <linux/timer.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/gfp.h>
15#include <net/checksum.h> 16#include <net/checksum.h>
16#include <net/icmp.h> 17#include <net/icmp.h>
17#include <net/ip.h> 18#include <net/ip.h>
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 4b6af4bb1f50..4a0c6b548eee 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/gfp.h>
11#include <linux/kmod.h> 12#include <linux/kmod.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/timer.h> 14#include <linux/timer.h>
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index b66137c80bc7..b48a0fc3d9ed 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -16,6 +16,7 @@
16#include <linux/kmod.h> 16#include <linux/kmod.h>
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/slab.h>
19#include <net/checksum.h> 20#include <net/checksum.h>
20#include <net/route.h> 21#include <net/route.h>
21#include <linux/bitops.h> 22#include <linux/bitops.h>
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 0b9c7ce3d6c5..4d85b6e55f29 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -43,6 +43,7 @@
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/slab.h>
46#include <linux/in.h> 47#include <linux/in.h>
47#include <linux/ip.h> 48#include <linux/ip.h>
48#include <linux/udp.h> 49#include <linux/udp.h>
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 0b49248e34fa..84c7974f5830 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/icmp.h> 9#include <linux/icmp.h>
10#include <linux/gfp.h>
10#include <linux/ip.h> 11#include <linux/ip.h>
11#include <linux/netfilter.h> 12#include <linux/netfilter.h>
12#include <linux/netfilter_ipv4.h> 13#include <linux/netfilter_ipv4.h>
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4f1f337f4337..3dc9914c1dce 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -251,6 +251,7 @@ static const struct snmp_mib snmp4_net_list[] = {
251 SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), 251 SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
252 SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), 252 SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), 253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
254 SNMP_MIB_SENTINEL 255 SNMP_MIB_SENTINEL
255}; 256};
256 257
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 34d9adb83590..bbda0d5f9244 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -60,7 +60,6 @@
60#include <net/net_namespace.h> 60#include <net/net_namespace.h>
61#include <net/dst.h> 61#include <net/dst.h>
62#include <net/sock.h> 62#include <net/sock.h>
63#include <linux/gfp.h>
64#include <linux/ip.h> 63#include <linux/ip.h>
65#include <linux/net.h> 64#include <linux/net.h>
66#include <net/ip.h> 65#include <net/ip.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a770df2493d2..cb562fdd9b9a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -90,6 +90,7 @@
90#include <linux/jhash.h> 90#include <linux/jhash.h>
91#include <linux/rcupdate.h> 91#include <linux/rcupdate.h>
92#include <linux/times.h> 92#include <linux/times.h>
93#include <linux/slab.h>
93#include <net/dst.h> 94#include <net/dst.h>
94#include <net/net_namespace.h> 95#include <net/net_namespace.h>
95#include <net/protocol.h> 96#include <net/protocol.h>
@@ -1097,7 +1098,7 @@ static int slow_chain_length(const struct rtable *head)
1097} 1098}
1098 1099
1099static int rt_intern_hash(unsigned hash, struct rtable *rt, 1100static int rt_intern_hash(unsigned hash, struct rtable *rt,
1100 struct rtable **rp, struct sk_buff *skb) 1101 struct rtable **rp, struct sk_buff *skb, int ifindex)
1101{ 1102{
1102 struct rtable *rth, **rthp; 1103 struct rtable *rth, **rthp;
1103 unsigned long now; 1104 unsigned long now;
@@ -1212,11 +1213,16 @@ restart:
1212 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { 1213 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1213 struct net *net = dev_net(rt->u.dst.dev); 1214 struct net *net = dev_net(rt->u.dst.dev);
1214 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1215 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1215 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1216 if (!rt_caching(net)) {
1216 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1217 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1217 rt->u.dst.dev->name, num); 1218 rt->u.dst.dev->name, num);
1218 } 1219 }
1219 rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); 1220 rt_emergency_hash_rebuild(net);
1221 spin_unlock_bh(rt_hash_lock_addr(hash));
1222
1223 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1224 ifindex, rt_genid(net));
1225 goto restart;
1220 } 1226 }
1221 } 1227 }
1222 1228
@@ -1441,7 +1447,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1441 dev_hold(rt->u.dst.dev); 1447 dev_hold(rt->u.dst.dev);
1442 if (rt->idev) 1448 if (rt->idev)
1443 in_dev_hold(rt->idev); 1449 in_dev_hold(rt->idev);
1444 rt->u.dst.obsolete = 0; 1450 rt->u.dst.obsolete = -1;
1445 rt->u.dst.lastuse = jiffies; 1451 rt->u.dst.lastuse = jiffies;
1446 rt->u.dst.path = &rt->u.dst; 1452 rt->u.dst.path = &rt->u.dst;
1447 rt->u.dst.neighbour = NULL; 1453 rt->u.dst.neighbour = NULL;
@@ -1477,7 +1483,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1477 &netevent); 1483 &netevent);
1478 1484
1479 rt_del(hash, rth); 1485 rt_del(hash, rth);
1480 if (!rt_intern_hash(hash, rt, &rt, NULL)) 1486 if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
1481 ip_rt_put(rt); 1487 ip_rt_put(rt);
1482 goto do_next; 1488 goto do_next;
1483 } 1489 }
@@ -1506,11 +1512,12 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1506 struct dst_entry *ret = dst; 1512 struct dst_entry *ret = dst;
1507 1513
1508 if (rt) { 1514 if (rt) {
1509 if (dst->obsolete) { 1515 if (dst->obsolete > 0) {
1510 ip_rt_put(rt); 1516 ip_rt_put(rt);
1511 ret = NULL; 1517 ret = NULL;
1512 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1518 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1513 rt->u.dst.expires) { 1519 (rt->u.dst.expires &&
1520 time_after_eq(jiffies, rt->u.dst.expires))) {
1514 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1521 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1515 rt->fl.oif, 1522 rt->fl.oif,
1516 rt_genid(dev_net(dst->dev))); 1523 rt_genid(dev_net(dst->dev)));
@@ -1726,7 +1733,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1726 1733
1727static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1734static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1728{ 1735{
1729 return NULL; 1736 if (rt_is_expired((struct rtable *)dst))
1737 return NULL;
1738 return dst;
1730} 1739}
1731 1740
1732static void ipv4_dst_destroy(struct dst_entry *dst) 1741static void ipv4_dst_destroy(struct dst_entry *dst)
@@ -1888,7 +1897,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1888 if (!rth) 1897 if (!rth)
1889 goto e_nobufs; 1898 goto e_nobufs;
1890 1899
1891 rth->u.dst.output= ip_rt_bug; 1900 rth->u.dst.output = ip_rt_bug;
1901 rth->u.dst.obsolete = -1;
1892 1902
1893 atomic_set(&rth->u.dst.__refcnt, 1); 1903 atomic_set(&rth->u.dst.__refcnt, 1);
1894 rth->u.dst.flags= DST_HOST; 1904 rth->u.dst.flags= DST_HOST;
@@ -1927,7 +1937,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1927 1937
1928 in_dev_put(in_dev); 1938 in_dev_put(in_dev);
1929 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1939 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1930 return rt_intern_hash(hash, rth, NULL, skb); 1940 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1931 1941
1932e_nobufs: 1942e_nobufs:
1933 in_dev_put(in_dev); 1943 in_dev_put(in_dev);
@@ -2054,6 +2064,7 @@ static int __mkroute_input(struct sk_buff *skb,
2054 rth->fl.oif = 0; 2064 rth->fl.oif = 0;
2055 rth->rt_spec_dst= spec_dst; 2065 rth->rt_spec_dst= spec_dst;
2056 2066
2067 rth->u.dst.obsolete = -1;
2057 rth->u.dst.input = ip_forward; 2068 rth->u.dst.input = ip_forward;
2058 rth->u.dst.output = ip_output; 2069 rth->u.dst.output = ip_output;
2059 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); 2070 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
@@ -2093,7 +2104,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
2093 /* put it into the cache */ 2104 /* put it into the cache */
2094 hash = rt_hash(daddr, saddr, fl->iif, 2105 hash = rt_hash(daddr, saddr, fl->iif,
2095 rt_genid(dev_net(rth->u.dst.dev))); 2106 rt_genid(dev_net(rth->u.dst.dev)));
2096 return rt_intern_hash(hash, rth, NULL, skb); 2107 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2097} 2108}
2098 2109
2099/* 2110/*
@@ -2218,6 +2229,7 @@ local_input:
2218 goto e_nobufs; 2229 goto e_nobufs;
2219 2230
2220 rth->u.dst.output= ip_rt_bug; 2231 rth->u.dst.output= ip_rt_bug;
2232 rth->u.dst.obsolete = -1;
2221 rth->rt_genid = rt_genid(net); 2233 rth->rt_genid = rt_genid(net);
2222 2234
2223 atomic_set(&rth->u.dst.__refcnt, 1); 2235 atomic_set(&rth->u.dst.__refcnt, 1);
@@ -2249,7 +2261,7 @@ local_input:
2249 } 2261 }
2250 rth->rt_type = res.type; 2262 rth->rt_type = res.type;
2251 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); 2263 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2252 err = rt_intern_hash(hash, rth, NULL, skb); 2264 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
2253 goto done; 2265 goto done;
2254 2266
2255no_route: 2267no_route:
@@ -2444,6 +2456,7 @@ static int __mkroute_output(struct rtable **result,
2444 rth->rt_spec_dst= fl->fl4_src; 2456 rth->rt_spec_dst= fl->fl4_src;
2445 2457
2446 rth->u.dst.output=ip_output; 2458 rth->u.dst.output=ip_output;
2459 rth->u.dst.obsolete = -1;
2447 rth->rt_genid = rt_genid(dev_net(dev_out)); 2460 rth->rt_genid = rt_genid(dev_net(dev_out));
2448 2461
2449 RT_CACHE_STAT_INC(out_slow_tot); 2462 RT_CACHE_STAT_INC(out_slow_tot);
@@ -2495,7 +2508,7 @@ static int ip_mkroute_output(struct rtable **rp,
2495 if (err == 0) { 2508 if (err == 0) {
2496 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, 2509 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2497 rt_genid(dev_net(dev_out))); 2510 rt_genid(dev_net(dev_out)));
2498 err = rt_intern_hash(hash, rth, rp, NULL); 2511 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
2499 } 2512 }
2500 2513
2501 return err; 2514 return err;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index c1bc074f61b7..1cd5c15174b8 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -12,6 +12,7 @@
12#include <linux/inetdevice.h> 12#include <linux/inetdevice.h>
13#include <linux/seqlock.h> 13#include <linux/seqlock.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h>
15#include <net/snmp.h> 16#include <net/snmp.h>
16#include <net/icmp.h> 17#include <net/icmp.h>
17#include <net/ip.h> 18#include <net/ip.h>
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5901010fad55..0f8caf64caa3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -265,6 +265,7 @@
265#include <linux/err.h> 265#include <linux/err.h>
266#include <linux/crypto.h> 266#include <linux/crypto.h>
267#include <linux/time.h> 267#include <linux/time.h>
268#include <linux/slab.h>
268 269
269#include <net/icmp.h> 270#include <net/icmp.h>
270#include <net/tcp.h> 271#include <net/tcp.h>
@@ -429,7 +430,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
429 if (tp->urg_seq == tp->copied_seq && 430 if (tp->urg_seq == tp->copied_seq &&
430 !sock_flag(sk, SOCK_URGINLINE) && 431 !sock_flag(sk, SOCK_URGINLINE) &&
431 tp->urg_data) 432 tp->urg_data)
432 target--; 433 target++;
433 434
434 /* Potential race condition. If read of tp below will 435 /* Potential race condition. If read of tp below will
435 * escape above sk->sk_state, we can be illegally awaken 436 * escape above sk->sk_state, we can be illegally awaken
@@ -1254,6 +1255,39 @@ static void tcp_prequeue_process(struct sock *sk)
1254 tp->ucopy.memory = 0; 1255 tp->ucopy.memory = 0;
1255} 1256}
1256 1257
1258#ifdef CONFIG_NET_DMA
1259static void tcp_service_net_dma(struct sock *sk, bool wait)
1260{
1261 dma_cookie_t done, used;
1262 dma_cookie_t last_issued;
1263 struct tcp_sock *tp = tcp_sk(sk);
1264
1265 if (!tp->ucopy.dma_chan)
1266 return;
1267
1268 last_issued = tp->ucopy.dma_cookie;
1269 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1270
1271 do {
1272 if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1273 last_issued, &done,
1274 &used) == DMA_SUCCESS) {
1275 /* Safe to free early-copied skbs now */
1276 __skb_queue_purge(&sk->sk_async_wait_queue);
1277 break;
1278 } else {
1279 struct sk_buff *skb;
1280 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1281 (dma_async_is_complete(skb->dma_cookie, done,
1282 used) == DMA_SUCCESS)) {
1283 __skb_dequeue(&sk->sk_async_wait_queue);
1284 kfree_skb(skb);
1285 }
1286 }
1287 } while (wait);
1288}
1289#endif
1290
1257static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1291static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1258{ 1292{
1259 struct sk_buff *skb; 1293 struct sk_buff *skb;
@@ -1335,6 +1369,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1335 sk_eat_skb(sk, skb, 0); 1369 sk_eat_skb(sk, skb, 0);
1336 if (!desc->count) 1370 if (!desc->count)
1337 break; 1371 break;
1372 tp->copied_seq = seq;
1338 } 1373 }
1339 tp->copied_seq = seq; 1374 tp->copied_seq = seq;
1340 1375
@@ -1546,6 +1581,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1546 /* __ Set realtime policy in scheduler __ */ 1581 /* __ Set realtime policy in scheduler __ */
1547 } 1582 }
1548 1583
1584#ifdef CONFIG_NET_DMA
1585 if (tp->ucopy.dma_chan)
1586 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1587#endif
1549 if (copied >= target) { 1588 if (copied >= target) {
1550 /* Do not sleep, just process backlog. */ 1589 /* Do not sleep, just process backlog. */
1551 release_sock(sk); 1590 release_sock(sk);
@@ -1554,6 +1593,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1554 sk_wait_data(sk, &timeo); 1593 sk_wait_data(sk, &timeo);
1555 1594
1556#ifdef CONFIG_NET_DMA 1595#ifdef CONFIG_NET_DMA
1596 tcp_service_net_dma(sk, false); /* Don't block */
1557 tp->ucopy.wakeup = 0; 1597 tp->ucopy.wakeup = 0;
1558#endif 1598#endif
1559 1599
@@ -1633,6 +1673,9 @@ do_prequeue:
1633 copied = -EFAULT; 1673 copied = -EFAULT;
1634 break; 1674 break;
1635 } 1675 }
1676
1677 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1678
1636 if ((offset + used) == skb->len) 1679 if ((offset + used) == skb->len)
1637 copied_early = 1; 1680 copied_early = 1;
1638 1681
@@ -1702,27 +1745,9 @@ skip_copy:
1702 } 1745 }
1703 1746
1704#ifdef CONFIG_NET_DMA 1747#ifdef CONFIG_NET_DMA
1705 if (tp->ucopy.dma_chan) { 1748 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1706 dma_cookie_t done, used; 1749 tp->ucopy.dma_chan = NULL;
1707
1708 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1709
1710 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1711 tp->ucopy.dma_cookie, &done,
1712 &used) == DMA_IN_PROGRESS) {
1713 /* do partial cleanup of sk_async_wait_queue */
1714 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1715 (dma_async_is_complete(skb->dma_cookie, done,
1716 used) == DMA_SUCCESS)) {
1717 __skb_dequeue(&sk->sk_async_wait_queue);
1718 kfree_skb(skb);
1719 }
1720 }
1721 1750
1722 /* Safe to free early-copied skbs now */
1723 __skb_queue_purge(&sk->sk_async_wait_queue);
1724 tp->ucopy.dma_chan = NULL;
1725 }
1726 if (tp->ucopy.pinned_list) { 1751 if (tp->ucopy.pinned_list) {
1727 dma_unpin_iovec_pages(tp->ucopy.pinned_list); 1752 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1728 tp->ucopy.pinned_list = NULL; 1753 tp->ucopy.pinned_list = NULL;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6428b342b164..0ec9bd0ae94f 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -10,6 +10,7 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/gfp.h>
13#include <net/tcp.h> 14#include <net/tcp.h>
14 15
15int sysctl_tcp_max_ssthresh = 0; 16int sysctl_tcp_max_ssthresh = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 788851ca8c5d..ae3ec15fb630 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -62,6 +62,7 @@
62 */ 62 */
63 63
64#include <linux/mm.h> 64#include <linux/mm.h>
65#include <linux/slab.h>
65#include <linux/module.h> 66#include <linux/module.h>
66#include <linux/sysctl.h> 67#include <linux/sysctl.h>
67#include <linux/kernel.h> 68#include <linux/kernel.h>
@@ -2511,6 +2512,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2511 int err; 2512 int err;
2512 unsigned int mss; 2513 unsigned int mss;
2513 2514
2515 if (packets == 0)
2516 return;
2517
2514 WARN_ON(packets > tp->packets_out); 2518 WARN_ON(packets > tp->packets_out);
2515 if (tp->lost_skb_hint) { 2519 if (tp->lost_skb_hint) {
2516 skb = tp->lost_skb_hint; 2520 skb = tp->lost_skb_hint;
@@ -3706,7 +3710,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3706 } 3710 }
3707 3711
3708 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3712 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3709 dst_confirm(sk->sk_dst_cache); 3713 dst_confirm(__sk_dst_get(sk));
3710 3714
3711 return 1; 3715 return 1;
3712 3716
@@ -4315,7 +4319,7 @@ static void tcp_ofo_queue(struct sock *sk)
4315 } 4319 }
4316 4320
4317 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4321 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4318 SOCK_DEBUG(sk, "ofo packet was already received \n"); 4322 SOCK_DEBUG(sk, "ofo packet was already received\n");
4319 __skb_unlink(skb, &tp->out_of_order_queue); 4323 __skb_unlink(skb, &tp->out_of_order_queue);
4320 __kfree_skb(skb); 4324 __kfree_skb(skb);
4321 continue; 4325 continue;
@@ -5829,7 +5833,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5829 if (tp->snd_una == tp->write_seq) { 5833 if (tp->snd_una == tp->write_seq) {
5830 tcp_set_state(sk, TCP_FIN_WAIT2); 5834 tcp_set_state(sk, TCP_FIN_WAIT2);
5831 sk->sk_shutdown |= SEND_SHUTDOWN; 5835 sk->sk_shutdown |= SEND_SHUTDOWN;
5832 dst_confirm(sk->sk_dst_cache); 5836 dst_confirm(__sk_dst_get(sk));
5833 5837
5834 if (!sock_flag(sk, SOCK_DEAD)) 5838 if (!sock_flag(sk, SOCK_DEAD))
5835 /* Wake up lingering close() */ 5839 /* Wake up lingering close() */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 70df40980a87..ad08392a738c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -60,6 +60,7 @@
60#include <linux/jhash.h> 60#include <linux/jhash.h>
61#include <linux/init.h> 61#include <linux/init.h>
62#include <linux/times.h> 62#include <linux/times.h>
63#include <linux/slab.h>
63 64
64#include <net/net_namespace.h> 65#include <net/net_namespace.h>
65#include <net/icmp.h> 66#include <net/icmp.h>
@@ -370,6 +371,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
370 if (sk->sk_state == TCP_CLOSE) 371 if (sk->sk_state == TCP_CLOSE)
371 goto out; 372 goto out;
372 373
374 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
375 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
376 goto out;
377 }
378
373 icsk = inet_csk(sk); 379 icsk = inet_csk(sk);
374 tp = tcp_sk(sk); 380 tp = tcp_sk(sk);
375 seq = ntohl(th->seq); 381 seq = ntohl(th->seq);
@@ -513,26 +519,31 @@ out:
513 sock_put(sk); 519 sock_put(sk);
514} 520}
515 521
516/* This routine computes an IPv4 TCP checksum. */ 522static void __tcp_v4_send_check(struct sk_buff *skb,
517void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) 523 __be32 saddr, __be32 daddr)
518{ 524{
519 struct inet_sock *inet = inet_sk(sk);
520 struct tcphdr *th = tcp_hdr(skb); 525 struct tcphdr *th = tcp_hdr(skb);
521 526
522 if (skb->ip_summed == CHECKSUM_PARTIAL) { 527 if (skb->ip_summed == CHECKSUM_PARTIAL) {
523 th->check = ~tcp_v4_check(len, inet->inet_saddr, 528 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
524 inet->inet_daddr, 0);
525 skb->csum_start = skb_transport_header(skb) - skb->head; 529 skb->csum_start = skb_transport_header(skb) - skb->head;
526 skb->csum_offset = offsetof(struct tcphdr, check); 530 skb->csum_offset = offsetof(struct tcphdr, check);
527 } else { 531 } else {
528 th->check = tcp_v4_check(len, inet->inet_saddr, 532 th->check = tcp_v4_check(skb->len, saddr, daddr,
529 inet->inet_daddr,
530 csum_partial(th, 533 csum_partial(th,
531 th->doff << 2, 534 th->doff << 2,
532 skb->csum)); 535 skb->csum));
533 } 536 }
534} 537}
535 538
539/* This routine computes an IPv4 TCP checksum. */
540void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
541{
542 struct inet_sock *inet = inet_sk(sk);
543
544 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
545}
546
536int tcp_v4_gso_send_check(struct sk_buff *skb) 547int tcp_v4_gso_send_check(struct sk_buff *skb)
537{ 548{
538 const struct iphdr *iph; 549 const struct iphdr *iph;
@@ -545,10 +556,8 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
545 th = tcp_hdr(skb); 556 th = tcp_hdr(skb);
546 557
547 th->check = 0; 558 th->check = 0;
548 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
551 skb->ip_summed = CHECKSUM_PARTIAL; 559 skb->ip_summed = CHECKSUM_PARTIAL;
560 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
552 return 0; 561 return 0;
553} 562}
554 563
@@ -757,13 +766,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
757 skb = tcp_make_synack(sk, dst, req, rvp); 766 skb = tcp_make_synack(sk, dst, req, rvp);
758 767
759 if (skb) { 768 if (skb) {
760 struct tcphdr *th = tcp_hdr(skb); 769 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
761
762 th->check = tcp_v4_check(skb->len,
763 ireq->loc_addr,
764 ireq->rmt_addr,
765 csum_partial(th, skb->len,
766 skb->csum));
767 770
768 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 771 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
769 ireq->rmt_addr, 772 ireq->rmt_addr,
@@ -1669,6 +1672,8 @@ process:
1669 1672
1670 skb->dev = NULL; 1673 skb->dev = NULL;
1671 1674
1675 inet_rps_save_rxhash(sk, skb->rxhash);
1676
1672 bh_lock_sock_nested(sk); 1677 bh_lock_sock_nested(sk);
1673 ret = 0; 1678 ret = 0;
1674 if (!sock_owned_by_user(sk)) { 1679 if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 4199bc6915c5..794c2e122a41 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/slab.h>
23#include <linux/sysctl.h> 24#include <linux/sysctl.h>
24#include <linux/workqueue.h> 25#include <linux/workqueue.h>
25#include <net/tcp.h> 26#include <net/tcp.h>
@@ -671,6 +672,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
671 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 672 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
672 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 673 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
673 inet_rsk(req)->acked = 1; 674 inet_rsk(req)->acked = 1;
675 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
674 return NULL; 676 return NULL;
675 } 677 }
676 678
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f181b78f2385..2b7d71fb8439 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -37,6 +37,7 @@
37#include <net/tcp.h> 37#include <net/tcp.h>
38 38
39#include <linux/compiler.h> 39#include <linux/compiler.h>
40#include <linux/gfp.h>
40#include <linux/module.h> 41#include <linux/module.h>
41 42
42/* People can turn this off for buggy TCP's found in printers etc. */ 43/* People can turn this off for buggy TCP's found in printers etc. */
@@ -349,6 +350,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
349 */ 350 */
350static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 351static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
351{ 352{
353 skb->ip_summed = CHECKSUM_PARTIAL;
352 skb->csum = 0; 354 skb->csum = 0;
353 355
354 TCP_SKB_CB(skb)->flags = flags; 356 TCP_SKB_CB(skb)->flags = flags;
@@ -877,7 +879,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
877 } 879 }
878#endif 880#endif
879 881
880 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 882 icsk->icsk_af_ops->send_check(sk, skb);
881 883
882 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 884 if (likely(tcb->flags & TCPCB_FLAG_ACK))
883 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 885 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
@@ -888,7 +890,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
888 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 890 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
889 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 891 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
890 892
891 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 893 err = icsk->icsk_af_ops->queue_xmit(skb);
892 if (likely(err <= 0)) 894 if (likely(err <= 0))
893 return err; 895 return err;
894 896
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 9bc805df95d2..f8efada580e8 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -22,6 +22,7 @@
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/socket.h> 23#include <linux/socket.h>
24#include <linux/tcp.h> 24#include <linux/tcp.h>
25#include <linux/slab.h>
25#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/ktime.h> 28#include <linux/ktime.h>
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b2e6bbccaee1..c732be00606b 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/gfp.h>
22#include <net/tcp.h> 23#include <net/tcp.h>
23 24
24int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; 25int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
@@ -171,14 +172,14 @@ static int tcp_write_timeout(struct sock *sk)
171 172
172 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 173 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
173 if (icsk->icsk_retransmits) 174 if (icsk->icsk_retransmits)
174 dst_negative_advice(&sk->sk_dst_cache, sk); 175 dst_negative_advice(sk);
175 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 176 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
176 } else { 177 } else {
177 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { 178 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
178 /* Black hole detection */ 179 /* Black hole detection */
179 tcp_mtu_probing(icsk, sk); 180 tcp_mtu_probing(icsk, sk);
180 181
181 dst_negative_advice(&sk->sk_dst_cache, sk); 182 dst_negative_advice(sk);
182 } 183 }
183 184
184 retry_until = sysctl_tcp_retries2; 185 retry_until = sysctl_tcp_retries2;
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 3959e0ca456a..3b3813cc80b9 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -8,6 +8,7 @@
8#include <linux/mutex.h> 8#include <linux/mutex.h>
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/slab.h>
11#include <net/icmp.h> 12#include <net/icmp.h>
12#include <net/ip.h> 13#include <net/ip.h>
13#include <net/protocol.h> 14#include <net/protocol.h>
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7af756d0f931..666b963496ff 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -95,6 +95,7 @@
95#include <linux/mm.h> 95#include <linux/mm.h>
96#include <linux/inet.h> 96#include <linux/inet.h>
97#include <linux/netdevice.h> 97#include <linux/netdevice.h>
98#include <linux/slab.h>
98#include <net/tcp_states.h> 99#include <net/tcp_states.h>
99#include <linux/skbuff.h> 100#include <linux/skbuff.h>
100#include <linux/proc_fs.h> 101#include <linux/proc_fs.h>
@@ -471,8 +472,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
471 if (hslot->count < hslot2->count) 472 if (hslot->count < hslot2->count)
472 goto begin; 473 goto begin;
473 474
474 result = udp4_lib_lookup2(net, INADDR_ANY, sport, 475 result = udp4_lib_lookup2(net, saddr, sport,
475 daddr, hnum, dif, 476 INADDR_ANY, hnum, dif,
476 hslot2, slot2); 477 hslot2, slot2);
477 } 478 }
478 rcu_read_unlock(); 479 rcu_read_unlock();
@@ -1216,6 +1217,7 @@ int udp_disconnect(struct sock *sk, int flags)
1216 sk->sk_state = TCP_CLOSE; 1217 sk->sk_state = TCP_CLOSE;
1217 inet->inet_daddr = 0; 1218 inet->inet_daddr = 0;
1218 inet->inet_dport = 0; 1219 inet->inet_dport = 0;
1220 inet_rps_save_rxhash(sk, 0);
1219 sk->sk_bound_dev_if = 0; 1221 sk->sk_bound_dev_if = 0;
1220 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1222 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1221 inet_reset_saddr(sk); 1223 inet_reset_saddr(sk);
@@ -1257,8 +1259,12 @@ EXPORT_SYMBOL(udp_lib_unhash);
1257 1259
1258static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1260static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1259{ 1261{
1260 int rc = sock_queue_rcv_skb(sk, skb); 1262 int rc;
1263
1264 if (inet_sk(sk)->inet_daddr)
1265 inet_rps_save_rxhash(sk, skb->rxhash);
1261 1266
1267 rc = sock_queue_rcv_skb(sk, skb);
1262 if (rc < 0) { 1268 if (rc < 0) {
1263 int is_udplite = IS_UDPLITE(sk); 1269 int is_udplite = IS_UDPLITE(sk);
1264 1270
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index c3969e0f96c3..abcd7ed65db1 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -9,6 +9,7 @@
9 * 9 *
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/string.h> 14#include <linux/string.h>
14#include <linux/netfilter.h> 15#include <linux/netfilter.h>
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 3444f3b34eca..6f368413eb0e 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au>
5 */ 5 */
6 6
7#include <linux/gfp.h>
7#include <linux/init.h> 8#include <linux/init.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/module.h> 10#include <linux/module.h>
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index e4a1483fba77..1705476670ef 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -59,27 +59,6 @@ static int xfrm4_get_saddr(struct net *net,
59 return 0; 59 return 0;
60} 60}
61 61
62static struct dst_entry *
63__xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
64{
65 struct dst_entry *dst;
66
67 read_lock_bh(&policy->lock);
68 for (dst = policy->bundles; dst; dst = dst->next) {
69 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
70 if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
71 xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
72 xdst->u.rt.fl.fl4_src == fl->fl4_src &&
73 xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
74 xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
75 dst_clone(dst);
76 break;
77 }
78 }
79 read_unlock_bh(&policy->lock);
80 return dst;
81}
82
83static int xfrm4_get_tos(struct flowi *fl) 62static int xfrm4_get_tos(struct flowi *fl)
84{ 63{
85 return fl->fl4_tos; 64 return fl->fl4_tos;
@@ -259,7 +238,6 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
259 .dst_ops = &xfrm4_dst_ops, 238 .dst_ops = &xfrm4_dst_ops,
260 .dst_lookup = xfrm4_dst_lookup, 239 .dst_lookup = xfrm4_dst_lookup,
261 .get_saddr = xfrm4_get_saddr, 240 .get_saddr = xfrm4_get_saddr,
262 .find_bundle = __xfrm4_find_bundle,
263 .decode_session = _decode_session4, 241 .decode_session = _decode_session4,
264 .get_tos = xfrm4_get_tos, 242 .get_tos = xfrm4_get_tos,
265 .init_path = xfrm4_init_path, 243 .init_path = xfrm4_init_path,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3381b4317c27..7cba8845242f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -53,6 +53,7 @@
53#include <linux/route.h> 53#include <linux/route.h>
54#include <linux/inetdevice.h> 54#include <linux/inetdevice.h>
55#include <linux/init.h> 55#include <linux/init.h>
56#include <linux/slab.h>
56#ifdef CONFIG_SYSCTL 57#ifdef CONFIG_SYSCTL
57#include <linux/sysctl.h> 58#include <linux/sysctl.h>
58#endif 59#endif
@@ -81,7 +82,7 @@
81#include <linux/random.h> 82#include <linux/random.h>
82#endif 83#endif
83 84
84#include <asm/uaccess.h> 85#include <linux/uaccess.h>
85#include <asm/unaligned.h> 86#include <asm/unaligned.h>
86 87
87#include <linux/proc_fs.h> 88#include <linux/proc_fs.h>
@@ -97,7 +98,11 @@
97#endif 98#endif
98 99
99#define INFINITY_LIFE_TIME 0xFFFFFFFF 100#define INFINITY_LIFE_TIME 0xFFFFFFFF
100#define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b))) 101#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b)))
102
103#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
104#define ADDRCONF_TIMER_FUZZ (HZ / 4)
105#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
101 106
102#ifdef CONFIG_SYSCTL 107#ifdef CONFIG_SYSCTL
103static void addrconf_sysctl_register(struct inet6_dev *idev); 108static void addrconf_sysctl_register(struct inet6_dev *idev);
@@ -126,8 +131,8 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
126/* 131/*
127 * Configured unicast address hash table 132 * Configured unicast address hash table
128 */ 133 */
129static struct inet6_ifaddr *inet6_addr_lst[IN6_ADDR_HSIZE]; 134static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
130static DEFINE_RWLOCK(addrconf_hash_lock); 135static DEFINE_SPINLOCK(addrconf_hash_lock);
131 136
132static void addrconf_verify(unsigned long); 137static void addrconf_verify(unsigned long);
133 138
@@ -137,8 +142,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock);
137static void addrconf_join_anycast(struct inet6_ifaddr *ifp); 142static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
138static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); 143static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
139 144
140static void addrconf_bonding_change(struct net_device *dev, 145static void addrconf_type_change(struct net_device *dev,
141 unsigned long event); 146 unsigned long event);
142static int addrconf_ifdown(struct net_device *dev, int how); 147static int addrconf_ifdown(struct net_device *dev, int how);
143 148
144static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); 149static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
@@ -151,8 +156,8 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
151 156
152static void inet6_prefix_notify(int event, struct inet6_dev *idev, 157static void inet6_prefix_notify(int event, struct inet6_dev *idev,
153 struct prefix_info *pinfo); 158 struct prefix_info *pinfo);
154static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, 159static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
155 struct net_device *dev); 160 struct net_device *dev);
156 161
157static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); 162static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
158 163
@@ -249,8 +254,7 @@ static void addrconf_del_timer(struct inet6_ifaddr *ifp)
249 __in6_ifa_put(ifp); 254 __in6_ifa_put(ifp);
250} 255}
251 256
252enum addrconf_timer_t 257enum addrconf_timer_t {
253{
254 AC_NONE, 258 AC_NONE,
255 AC_DAD, 259 AC_DAD,
256 AC_RS, 260 AC_RS,
@@ -270,7 +274,8 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
270 case AC_RS: 274 case AC_RS:
271 ifp->timer.function = addrconf_rs_timer; 275 ifp->timer.function = addrconf_rs_timer;
272 break; 276 break;
273 default:; 277 default:
278 break;
274 } 279 }
275 ifp->timer.expires = jiffies + when; 280 ifp->timer.expires = jiffies + when;
276 add_timer(&ifp->timer); 281 add_timer(&ifp->timer);
@@ -317,7 +322,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
317{ 322{
318 struct net_device *dev = idev->dev; 323 struct net_device *dev = idev->dev;
319 324
320 WARN_ON(idev->addr_list != NULL); 325 WARN_ON(!list_empty(&idev->addr_list));
321 WARN_ON(idev->mc_list != NULL); 326 WARN_ON(idev->mc_list != NULL);
322 327
323#ifdef NET_REFCNT_DEBUG 328#ifdef NET_REFCNT_DEBUG
@@ -325,7 +330,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
325#endif 330#endif
326 dev_put(dev); 331 dev_put(dev);
327 if (!idev->dead) { 332 if (!idev->dead) {
328 printk("Freeing alive inet6 device %p\n", idev); 333 pr_warning("Freeing alive inet6 device %p\n", idev);
329 return; 334 return;
330 } 335 }
331 snmp6_free_dev(idev); 336 snmp6_free_dev(idev);
@@ -350,6 +355,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
350 355
351 rwlock_init(&ndev->lock); 356 rwlock_init(&ndev->lock);
352 ndev->dev = dev; 357 ndev->dev = dev;
358 INIT_LIST_HEAD(&ndev->addr_list);
359
353 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); 360 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
354 ndev->cnf.mtu6 = dev->mtu; 361 ndev->cnf.mtu6 = dev->mtu;
355 ndev->cnf.sysctl = NULL; 362 ndev->cnf.sysctl = NULL;
@@ -401,6 +408,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
401#endif 408#endif
402 409
403#ifdef CONFIG_IPV6_PRIVACY 410#ifdef CONFIG_IPV6_PRIVACY
411 INIT_LIST_HEAD(&ndev->tempaddr_list);
404 setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); 412 setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
405 if ((dev->flags&IFF_LOOPBACK) || 413 if ((dev->flags&IFF_LOOPBACK) ||
406 dev->type == ARPHRD_TUNNEL || 414 dev->type == ARPHRD_TUNNEL ||
@@ -438,8 +446,10 @@ static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
438 446
439 ASSERT_RTNL(); 447 ASSERT_RTNL();
440 448
441 if ((idev = __in6_dev_get(dev)) == NULL) { 449 idev = __in6_dev_get(dev);
442 if ((idev = ipv6_add_dev(dev)) == NULL) 450 if (!idev) {
451 idev = ipv6_add_dev(dev);
452 if (!idev)
443 return NULL; 453 return NULL;
444 } 454 }
445 455
@@ -465,7 +475,8 @@ static void dev_forward_change(struct inet6_dev *idev)
465 else 475 else
466 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); 476 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
467 } 477 }
468 for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) { 478
479 list_for_each_entry(ifa, &idev->addr_list, if_list) {
469 if (ifa->flags&IFA_F_TENTATIVE) 480 if (ifa->flags&IFA_F_TENTATIVE)
470 continue; 481 continue;
471 if (idev->cnf.forwarding) 482 if (idev->cnf.forwarding)
@@ -522,12 +533,16 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
522} 533}
523#endif 534#endif
524 535
525/* Nobody refers to this ifaddr, destroy it */ 536static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head)
537{
538 struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu);
539 kfree(ifp);
540}
526 541
542/* Nobody refers to this ifaddr, destroy it */
527void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) 543void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
528{ 544{
529 WARN_ON(ifp->if_next != NULL); 545 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
530 WARN_ON(ifp->lst_next != NULL);
531 546
532#ifdef NET_REFCNT_DEBUG 547#ifdef NET_REFCNT_DEBUG
533 printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); 548 printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
@@ -536,54 +551,45 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
536 in6_dev_put(ifp->idev); 551 in6_dev_put(ifp->idev);
537 552
538 if (del_timer(&ifp->timer)) 553 if (del_timer(&ifp->timer))
539 printk("Timer is still running, when freeing ifa=%p\n", ifp); 554 pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
540 555
541 if (!ifp->dead) { 556 if (!ifp->dead) {
542 printk("Freeing alive inet6 address %p\n", ifp); 557 pr_warning("Freeing alive inet6 address %p\n", ifp);
543 return; 558 return;
544 } 559 }
545 dst_release(&ifp->rt->u.dst); 560 dst_release(&ifp->rt->u.dst);
546 561
547 kfree(ifp); 562 call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu);
548} 563}
549 564
550static void 565static void
551ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) 566ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
552{ 567{
553 struct inet6_ifaddr *ifa, **ifap; 568 struct list_head *p;
554 int ifp_scope = ipv6_addr_src_scope(&ifp->addr); 569 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
555 570
556 /* 571 /*
557 * Each device address list is sorted in order of scope - 572 * Each device address list is sorted in order of scope -
558 * global before linklocal. 573 * global before linklocal.
559 */ 574 */
560 for (ifap = &idev->addr_list; (ifa = *ifap) != NULL; 575 list_for_each(p, &idev->addr_list) {
561 ifap = &ifa->if_next) { 576 struct inet6_ifaddr *ifa
577 = list_entry(p, struct inet6_ifaddr, if_list);
562 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) 578 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
563 break; 579 break;
564 } 580 }
565 581
566 ifp->if_next = *ifap; 582 list_add_tail(&ifp->if_list, p);
567 *ifap = ifp;
568} 583}
569 584
570/* 585static u32 ipv6_addr_hash(const struct in6_addr *addr)
571 * Hash function taken from net_alias.c
572 */
573static u8 ipv6_addr_hash(const struct in6_addr *addr)
574{ 586{
575 __u32 word;
576
577 /* 587 /*
578 * We perform the hash function over the last 64 bits of the address 588 * We perform the hash function over the last 64 bits of the address
579 * This will include the IEEE address token on links that support it. 589 * This will include the IEEE address token on links that support it.
580 */ 590 */
581 591 return jhash_2words(addr->s6_addr32[2], addr->s6_addr32[3], 0)
582 word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]); 592 & (IN6_ADDR_HSIZE - 1);
583 word ^= (word >> 16);
584 word ^= (word >> 8);
585
586 return ((word ^ (word >> 4)) & 0x0f);
587} 593}
588 594
589/* On success it returns ifp with increased reference count */ 595/* On success it returns ifp with increased reference count */
@@ -594,7 +600,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
594{ 600{
595 struct inet6_ifaddr *ifa = NULL; 601 struct inet6_ifaddr *ifa = NULL;
596 struct rt6_info *rt; 602 struct rt6_info *rt;
597 int hash; 603 unsigned int hash;
598 int err = 0; 604 int err = 0;
599 int addr_type = ipv6_addr_type(addr); 605 int addr_type = ipv6_addr_type(addr);
600 606
@@ -615,7 +621,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
615 goto out2; 621 goto out2;
616 } 622 }
617 623
618 write_lock(&addrconf_hash_lock); 624 spin_lock(&addrconf_hash_lock);
619 625
620 /* Ignore adding duplicate addresses on an interface */ 626 /* Ignore adding duplicate addresses on an interface */
621 if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { 627 if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
@@ -642,6 +648,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
642 648
643 spin_lock_init(&ifa->lock); 649 spin_lock_init(&ifa->lock);
644 init_timer(&ifa->timer); 650 init_timer(&ifa->timer);
651 INIT_HLIST_NODE(&ifa->addr_lst);
645 ifa->timer.data = (unsigned long) ifa; 652 ifa->timer.data = (unsigned long) ifa;
646 ifa->scope = scope; 653 ifa->scope = scope;
647 ifa->prefix_len = pfxlen; 654 ifa->prefix_len = pfxlen;
@@ -668,10 +675,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
668 /* Add to big hash table */ 675 /* Add to big hash table */
669 hash = ipv6_addr_hash(addr); 676 hash = ipv6_addr_hash(addr);
670 677
671 ifa->lst_next = inet6_addr_lst[hash]; 678 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
672 inet6_addr_lst[hash] = ifa; 679 spin_unlock(&addrconf_hash_lock);
673 in6_ifa_hold(ifa);
674 write_unlock(&addrconf_hash_lock);
675 680
676 write_lock(&idev->lock); 681 write_lock(&idev->lock);
677 /* Add to inet6_dev unicast addr list. */ 682 /* Add to inet6_dev unicast addr list. */
@@ -679,8 +684,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
679 684
680#ifdef CONFIG_IPV6_PRIVACY 685#ifdef CONFIG_IPV6_PRIVACY
681 if (ifa->flags&IFA_F_TEMPORARY) { 686 if (ifa->flags&IFA_F_TEMPORARY) {
682 ifa->tmp_next = idev->tempaddr_list; 687 list_add(&ifa->tmp_list, &idev->tempaddr_list);
683 idev->tempaddr_list = ifa;
684 in6_ifa_hold(ifa); 688 in6_ifa_hold(ifa);
685 } 689 }
686#endif 690#endif
@@ -699,7 +703,7 @@ out2:
699 703
700 return ifa; 704 return ifa;
701out: 705out:
702 write_unlock(&addrconf_hash_lock); 706 spin_unlock(&addrconf_hash_lock);
703 goto out2; 707 goto out2;
704} 708}
705 709
@@ -707,7 +711,7 @@ out:
707 711
708static void ipv6_del_addr(struct inet6_ifaddr *ifp) 712static void ipv6_del_addr(struct inet6_ifaddr *ifp)
709{ 713{
710 struct inet6_ifaddr *ifa, **ifap; 714 struct inet6_ifaddr *ifa, *ifn;
711 struct inet6_dev *idev = ifp->idev; 715 struct inet6_dev *idev = ifp->idev;
712 int hash; 716 int hash;
713 int deleted = 0, onlink = 0; 717 int deleted = 0, onlink = 0;
@@ -717,42 +721,27 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
717 721
718 ifp->dead = 1; 722 ifp->dead = 1;
719 723
720 write_lock_bh(&addrconf_hash_lock); 724 spin_lock_bh(&addrconf_hash_lock);
721 for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL; 725 hlist_del_init_rcu(&ifp->addr_lst);
722 ifap = &ifa->lst_next) { 726 spin_unlock_bh(&addrconf_hash_lock);
723 if (ifa == ifp) {
724 *ifap = ifa->lst_next;
725 __in6_ifa_put(ifp);
726 ifa->lst_next = NULL;
727 break;
728 }
729 }
730 write_unlock_bh(&addrconf_hash_lock);
731 727
732 write_lock_bh(&idev->lock); 728 write_lock_bh(&idev->lock);
733#ifdef CONFIG_IPV6_PRIVACY 729#ifdef CONFIG_IPV6_PRIVACY
734 if (ifp->flags&IFA_F_TEMPORARY) { 730 if (ifp->flags&IFA_F_TEMPORARY) {
735 for (ifap = &idev->tempaddr_list; (ifa=*ifap) != NULL; 731 list_del(&ifp->tmp_list);
736 ifap = &ifa->tmp_next) { 732 if (ifp->ifpub) {
737 if (ifa == ifp) { 733 in6_ifa_put(ifp->ifpub);
738 *ifap = ifa->tmp_next; 734 ifp->ifpub = NULL;
739 if (ifp->ifpub) {
740 in6_ifa_put(ifp->ifpub);
741 ifp->ifpub = NULL;
742 }
743 __in6_ifa_put(ifp);
744 ifa->tmp_next = NULL;
745 break;
746 }
747 } 735 }
736 __in6_ifa_put(ifp);
748 } 737 }
749#endif 738#endif
750 739
751 for (ifap = &idev->addr_list; (ifa=*ifap) != NULL;) { 740 list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) {
752 if (ifa == ifp) { 741 if (ifa == ifp) {
753 *ifap = ifa->if_next; 742 list_del_init(&ifp->if_list);
754 __in6_ifa_put(ifp); 743 __in6_ifa_put(ifp);
755 ifa->if_next = NULL; 744
756 if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0) 745 if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0)
757 break; 746 break;
758 deleted = 1; 747 deleted = 1;
@@ -785,7 +774,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
785 } 774 }
786 } 775 }
787 } 776 }
788 ifap = &ifa->if_next;
789 } 777 }
790 write_unlock_bh(&idev->lock); 778 write_unlock_bh(&idev->lock);
791 779
@@ -1164,7 +1152,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
1164 continue; 1152 continue;
1165 1153
1166 read_lock_bh(&idev->lock); 1154 read_lock_bh(&idev->lock);
1167 for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) { 1155 list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
1168 int i; 1156 int i;
1169 1157
1170 /* 1158 /*
@@ -1242,7 +1230,6 @@ try_nextdev:
1242 in6_ifa_put(hiscore->ifa); 1230 in6_ifa_put(hiscore->ifa);
1243 return 0; 1231 return 0;
1244} 1232}
1245
1246EXPORT_SYMBOL(ipv6_dev_get_saddr); 1233EXPORT_SYMBOL(ipv6_dev_get_saddr);
1247 1234
1248int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, 1235int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
@@ -1252,12 +1239,14 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1252 int err = -EADDRNOTAVAIL; 1239 int err = -EADDRNOTAVAIL;
1253 1240
1254 rcu_read_lock(); 1241 rcu_read_lock();
1255 if ((idev = __in6_dev_get(dev)) != NULL) { 1242 idev = __in6_dev_get(dev);
1243 if (idev) {
1256 struct inet6_ifaddr *ifp; 1244 struct inet6_ifaddr *ifp;
1257 1245
1258 read_lock_bh(&idev->lock); 1246 read_lock_bh(&idev->lock);
1259 for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { 1247 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1260 if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { 1248 if (ifp->scope == IFA_LINK &&
1249 !(ifp->flags & banned_flags)) {
1261 ipv6_addr_copy(addr, &ifp->addr); 1250 ipv6_addr_copy(addr, &ifp->addr);
1262 err = 0; 1251 err = 0;
1263 break; 1252 break;
@@ -1275,7 +1264,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1275 struct inet6_ifaddr *ifp; 1264 struct inet6_ifaddr *ifp;
1276 1265
1277 read_lock_bh(&idev->lock); 1266 read_lock_bh(&idev->lock);
1278 for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) 1267 list_for_each_entry(ifp, &idev->addr_list, if_list)
1279 cnt++; 1268 cnt++;
1280 read_unlock_bh(&idev->lock); 1269 read_unlock_bh(&idev->lock);
1281 return cnt; 1270 return cnt;
@@ -1284,11 +1273,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1284int ipv6_chk_addr(struct net *net, struct in6_addr *addr, 1273int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
1285 struct net_device *dev, int strict) 1274 struct net_device *dev, int strict)
1286{ 1275{
1287 struct inet6_ifaddr * ifp; 1276 struct inet6_ifaddr *ifp = NULL;
1288 u8 hash = ipv6_addr_hash(addr); 1277 struct hlist_node *node;
1278 unsigned int hash = ipv6_addr_hash(addr);
1289 1279
1290 read_lock_bh(&addrconf_hash_lock); 1280 rcu_read_lock_bh();
1291 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1281 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1292 if (!net_eq(dev_net(ifp->idev->dev), net)) 1282 if (!net_eq(dev_net(ifp->idev->dev), net))
1293 continue; 1283 continue;
1294 if (ipv6_addr_equal(&ifp->addr, addr) && 1284 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -1298,27 +1288,28 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
1298 break; 1288 break;
1299 } 1289 }
1300 } 1290 }
1301 read_unlock_bh(&addrconf_hash_lock); 1291 rcu_read_unlock_bh();
1292
1302 return ifp != NULL; 1293 return ifp != NULL;
1303} 1294}
1304EXPORT_SYMBOL(ipv6_chk_addr); 1295EXPORT_SYMBOL(ipv6_chk_addr);
1305 1296
1306static 1297static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1307int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, 1298 struct net_device *dev)
1308 struct net_device *dev)
1309{ 1299{
1310 struct inet6_ifaddr * ifp; 1300 unsigned int hash = ipv6_addr_hash(addr);
1311 u8 hash = ipv6_addr_hash(addr); 1301 struct inet6_ifaddr *ifp;
1302 struct hlist_node *node;
1312 1303
1313 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1304 hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1314 if (!net_eq(dev_net(ifp->idev->dev), net)) 1305 if (!net_eq(dev_net(ifp->idev->dev), net))
1315 continue; 1306 continue;
1316 if (ipv6_addr_equal(&ifp->addr, addr)) { 1307 if (ipv6_addr_equal(&ifp->addr, addr)) {
1317 if (dev == NULL || ifp->idev->dev == dev) 1308 if (dev == NULL || ifp->idev->dev == dev)
1318 break; 1309 return true;
1319 } 1310 }
1320 } 1311 }
1321 return ifp != NULL; 1312 return false;
1322} 1313}
1323 1314
1324int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) 1315int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
@@ -1332,7 +1323,7 @@ int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
1332 idev = __in6_dev_get(dev); 1323 idev = __in6_dev_get(dev);
1333 if (idev) { 1324 if (idev) {
1334 read_lock_bh(&idev->lock); 1325 read_lock_bh(&idev->lock);
1335 for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) { 1326 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1336 onlink = ipv6_prefix_equal(addr, &ifa->addr, 1327 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1337 ifa->prefix_len); 1328 ifa->prefix_len);
1338 if (onlink) 1329 if (onlink)
@@ -1349,24 +1340,26 @@ EXPORT_SYMBOL(ipv6_chk_prefix);
1349struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, 1340struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1350 struct net_device *dev, int strict) 1341 struct net_device *dev, int strict)
1351{ 1342{
1352 struct inet6_ifaddr * ifp; 1343 struct inet6_ifaddr *ifp, *result = NULL;
1353 u8 hash = ipv6_addr_hash(addr); 1344 unsigned int hash = ipv6_addr_hash(addr);
1345 struct hlist_node *node;
1354 1346
1355 read_lock_bh(&addrconf_hash_lock); 1347 rcu_read_lock_bh();
1356 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1348 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1357 if (!net_eq(dev_net(ifp->idev->dev), net)) 1349 if (!net_eq(dev_net(ifp->idev->dev), net))
1358 continue; 1350 continue;
1359 if (ipv6_addr_equal(&ifp->addr, addr)) { 1351 if (ipv6_addr_equal(&ifp->addr, addr)) {
1360 if (dev == NULL || ifp->idev->dev == dev || 1352 if (dev == NULL || ifp->idev->dev == dev ||
1361 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { 1353 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1354 result = ifp;
1362 in6_ifa_hold(ifp); 1355 in6_ifa_hold(ifp);
1363 break; 1356 break;
1364 } 1357 }
1365 } 1358 }
1366 } 1359 }
1367 read_unlock_bh(&addrconf_hash_lock); 1360 rcu_read_unlock_bh();
1368 1361
1369 return ifp; 1362 return result;
1370} 1363}
1371 1364
1372/* Gets referenced address, destroys ifaddr */ 1365/* Gets referenced address, destroys ifaddr */
@@ -1569,7 +1562,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
1569 struct inet6_ifaddr *ifp; 1562 struct inet6_ifaddr *ifp;
1570 1563
1571 read_lock_bh(&idev->lock); 1564 read_lock_bh(&idev->lock);
1572 for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { 1565 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1573 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { 1566 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
1574 memcpy(eui, ifp->addr.s6_addr+8, 8); 1567 memcpy(eui, ifp->addr.s6_addr+8, 8);
1575 err = 0; 1568 err = 0;
@@ -1737,7 +1730,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
1737 1730
1738 ASSERT_RTNL(); 1731 ASSERT_RTNL();
1739 1732
1740 if ((idev = ipv6_find_idev(dev)) == NULL) 1733 idev = ipv6_find_idev(dev);
1734 if (!idev)
1741 return NULL; 1735 return NULL;
1742 1736
1743 /* Add default multicast route */ 1737 /* Add default multicast route */
@@ -1970,7 +1964,7 @@ ok:
1970#ifdef CONFIG_IPV6_PRIVACY 1964#ifdef CONFIG_IPV6_PRIVACY
1971 read_lock_bh(&in6_dev->lock); 1965 read_lock_bh(&in6_dev->lock);
1972 /* update all temporary addresses in the list */ 1966 /* update all temporary addresses in the list */
1973 for (ift=in6_dev->tempaddr_list; ift; ift=ift->tmp_next) { 1967 list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) {
1974 /* 1968 /*
1975 * When adjusting the lifetimes of an existing 1969 * When adjusting the lifetimes of an existing
1976 * temporary address, only lower the lifetimes. 1970 * temporary address, only lower the lifetimes.
@@ -2173,7 +2167,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
2173 return -ENXIO; 2167 return -ENXIO;
2174 2168
2175 read_lock_bh(&idev->lock); 2169 read_lock_bh(&idev->lock);
2176 for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) { 2170 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2177 if (ifp->prefix_len == plen && 2171 if (ifp->prefix_len == plen &&
2178 ipv6_addr_equal(pfx, &ifp->addr)) { 2172 ipv6_addr_equal(pfx, &ifp->addr)) {
2179 in6_ifa_hold(ifp); 2173 in6_ifa_hold(ifp);
@@ -2184,7 +2178,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
2184 /* If the last address is deleted administratively, 2178 /* If the last address is deleted administratively,
2185 disable IPv6 on this interface. 2179 disable IPv6 on this interface.
2186 */ 2180 */
2187 if (idev->addr_list == NULL) 2181 if (list_empty(&idev->addr_list))
2188 addrconf_ifdown(idev->dev, 1); 2182 addrconf_ifdown(idev->dev, 1);
2189 return 0; 2183 return 0;
2190 } 2184 }
@@ -2445,7 +2439,8 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2445 2439
2446 ASSERT_RTNL(); 2440 ASSERT_RTNL();
2447 2441
2448 if ((idev = addrconf_add_dev(dev)) == NULL) { 2442 idev = addrconf_add_dev(dev);
2443 if (!idev) {
2449 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); 2444 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
2450 return; 2445 return;
2451 } 2446 }
@@ -2460,7 +2455,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2460 int run_pending = 0; 2455 int run_pending = 0;
2461 int err; 2456 int err;
2462 2457
2463 switch(event) { 2458 switch (event) {
2464 case NETDEV_REGISTER: 2459 case NETDEV_REGISTER:
2465 if (!idev && dev->mtu >= IPV6_MIN_MTU) { 2460 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
2466 idev = ipv6_add_dev(dev); 2461 idev = ipv6_add_dev(dev);
@@ -2468,6 +2463,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2468 return notifier_from_errno(-ENOMEM); 2463 return notifier_from_errno(-ENOMEM);
2469 } 2464 }
2470 break; 2465 break;
2466
2471 case NETDEV_UP: 2467 case NETDEV_UP:
2472 case NETDEV_CHANGE: 2468 case NETDEV_CHANGE:
2473 if (dev->flags & IFF_SLAVE) 2469 if (dev->flags & IFF_SLAVE)
@@ -2497,10 +2493,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2497 } 2493 }
2498 2494
2499 if (idev) { 2495 if (idev) {
2500 if (idev->if_flags & IF_READY) { 2496 if (idev->if_flags & IF_READY)
2501 /* device is already configured. */ 2497 /* device is already configured. */
2502 break; 2498 break;
2503 }
2504 idev->if_flags |= IF_READY; 2499 idev->if_flags |= IF_READY;
2505 } 2500 }
2506 2501
@@ -2512,7 +2507,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2512 run_pending = 1; 2507 run_pending = 1;
2513 } 2508 }
2514 2509
2515 switch(dev->type) { 2510 switch (dev->type) {
2516#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2511#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2517 case ARPHRD_SIT: 2512 case ARPHRD_SIT:
2518 addrconf_sit_config(dev); 2513 addrconf_sit_config(dev);
@@ -2529,25 +2524,30 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2529 addrconf_dev_config(dev); 2524 addrconf_dev_config(dev);
2530 break; 2525 break;
2531 } 2526 }
2527
2532 if (idev) { 2528 if (idev) {
2533 if (run_pending) 2529 if (run_pending)
2534 addrconf_dad_run(idev); 2530 addrconf_dad_run(idev);
2535 2531
2536 /* If the MTU changed during the interface down, when the 2532 /*
2537 interface up, the changed MTU must be reflected in the 2533 * If the MTU changed during the interface down,
2538 idev as well as routers. 2534 * when the interface up, the changed MTU must be
2535 * reflected in the idev as well as routers.
2539 */ 2536 */
2540 if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) { 2537 if (idev->cnf.mtu6 != dev->mtu &&
2538 dev->mtu >= IPV6_MIN_MTU) {
2541 rt6_mtu_change(dev, dev->mtu); 2539 rt6_mtu_change(dev, dev->mtu);
2542 idev->cnf.mtu6 = dev->mtu; 2540 idev->cnf.mtu6 = dev->mtu;
2543 } 2541 }
2544 idev->tstamp = jiffies; 2542 idev->tstamp = jiffies;
2545 inet6_ifinfo_notify(RTM_NEWLINK, idev); 2543 inet6_ifinfo_notify(RTM_NEWLINK, idev);
2546 /* If the changed mtu during down is lower than IPV6_MIN_MTU 2544
2547 stop IPv6 on this interface. 2545 /*
2546 * If the changed mtu during down is lower than
2547 * IPV6_MIN_MTU stop IPv6 on this interface.
2548 */ 2548 */
2549 if (dev->mtu < IPV6_MIN_MTU) 2549 if (dev->mtu < IPV6_MIN_MTU)
2550 addrconf_ifdown(dev, event != NETDEV_DOWN); 2550 addrconf_ifdown(dev, 1);
2551 } 2551 }
2552 break; 2552 break;
2553 2553
@@ -2564,7 +2564,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2564 break; 2564 break;
2565 } 2565 }
2566 2566
2567 /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */ 2567 /*
2568 * MTU falled under IPV6_MIN_MTU.
2569 * Stop IPv6 on this interface.
2570 */
2568 2571
2569 case NETDEV_DOWN: 2572 case NETDEV_DOWN:
2570 case NETDEV_UNREGISTER: 2573 case NETDEV_UNREGISTER:
@@ -2584,9 +2587,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2584 return notifier_from_errno(err); 2587 return notifier_from_errno(err);
2585 } 2588 }
2586 break; 2589 break;
2587 case NETDEV_BONDING_OLDTYPE: 2590
2588 case NETDEV_BONDING_NEWTYPE: 2591 case NETDEV_PRE_TYPE_CHANGE:
2589 addrconf_bonding_change(dev, event); 2592 case NETDEV_POST_TYPE_CHANGE:
2593 addrconf_type_change(dev, event);
2590 break; 2594 break;
2591 } 2595 }
2592 2596
@@ -2598,28 +2602,27 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2598 */ 2602 */
2599static struct notifier_block ipv6_dev_notf = { 2603static struct notifier_block ipv6_dev_notf = {
2600 .notifier_call = addrconf_notify, 2604 .notifier_call = addrconf_notify,
2601 .priority = 0
2602}; 2605};
2603 2606
2604static void addrconf_bonding_change(struct net_device *dev, unsigned long event) 2607static void addrconf_type_change(struct net_device *dev, unsigned long event)
2605{ 2608{
2606 struct inet6_dev *idev; 2609 struct inet6_dev *idev;
2607 ASSERT_RTNL(); 2610 ASSERT_RTNL();
2608 2611
2609 idev = __in6_dev_get(dev); 2612 idev = __in6_dev_get(dev);
2610 2613
2611 if (event == NETDEV_BONDING_NEWTYPE) 2614 if (event == NETDEV_POST_TYPE_CHANGE)
2612 ipv6_mc_remap(idev); 2615 ipv6_mc_remap(idev);
2613 else if (event == NETDEV_BONDING_OLDTYPE) 2616 else if (event == NETDEV_PRE_TYPE_CHANGE)
2614 ipv6_mc_unmap(idev); 2617 ipv6_mc_unmap(idev);
2615} 2618}
2616 2619
2617static int addrconf_ifdown(struct net_device *dev, int how) 2620static int addrconf_ifdown(struct net_device *dev, int how)
2618{ 2621{
2619 struct inet6_dev *idev;
2620 struct inet6_ifaddr *ifa, *keep_list, **bifa;
2621 struct net *net = dev_net(dev); 2622 struct net *net = dev_net(dev);
2622 int i; 2623 struct inet6_dev *idev;
2624 struct inet6_ifaddr *ifa;
2625 LIST_HEAD(keep_list);
2623 2626
2624 ASSERT_RTNL(); 2627 ASSERT_RTNL();
2625 2628
@@ -2630,8 +2633,9 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2630 if (idev == NULL) 2633 if (idev == NULL)
2631 return -ENODEV; 2634 return -ENODEV;
2632 2635
2633 /* Step 1: remove reference to ipv6 device from parent device. 2636 /*
2634 Do not dev_put! 2637 * Step 1: remove reference to ipv6 device from parent device.
2638 * Do not dev_put!
2635 */ 2639 */
2636 if (how) { 2640 if (how) {
2637 idev->dead = 1; 2641 idev->dead = 1;
@@ -2644,40 +2648,21 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2644 2648
2645 } 2649 }
2646 2650
2647 /* Step 2: clear hash table */
2648 for (i=0; i<IN6_ADDR_HSIZE; i++) {
2649 bifa = &inet6_addr_lst[i];
2650
2651 write_lock_bh(&addrconf_hash_lock);
2652 while ((ifa = *bifa) != NULL) {
2653 if (ifa->idev == idev &&
2654 (how || !(ifa->flags&IFA_F_PERMANENT) ||
2655 ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2656 *bifa = ifa->lst_next;
2657 ifa->lst_next = NULL;
2658 __in6_ifa_put(ifa);
2659 continue;
2660 }
2661 bifa = &ifa->lst_next;
2662 }
2663 write_unlock_bh(&addrconf_hash_lock);
2664 }
2665
2666 write_lock_bh(&idev->lock); 2651 write_lock_bh(&idev->lock);
2667 2652
2668 /* Step 3: clear flags for stateless addrconf */ 2653 /* Step 2: clear flags for stateless addrconf */
2669 if (!how) 2654 if (!how)
2670 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); 2655 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
2671 2656
2672 /* Step 4: clear address list */
2673#ifdef CONFIG_IPV6_PRIVACY 2657#ifdef CONFIG_IPV6_PRIVACY
2674 if (how && del_timer(&idev->regen_timer)) 2658 if (how && del_timer(&idev->regen_timer))
2675 in6_dev_put(idev); 2659 in6_dev_put(idev);
2676 2660
2677 /* clear tempaddr list */ 2661 /* Step 3: clear tempaddr list */
2678 while ((ifa = idev->tempaddr_list) != NULL) { 2662 while (!list_empty(&idev->tempaddr_list)) {
2679 idev->tempaddr_list = ifa->tmp_next; 2663 ifa = list_first_entry(&idev->tempaddr_list,
2680 ifa->tmp_next = NULL; 2664 struct inet6_ifaddr, tmp_list);
2665 list_del(&ifa->tmp_list);
2681 ifa->dead = 1; 2666 ifa->dead = 1;
2682 write_unlock_bh(&idev->lock); 2667 write_unlock_bh(&idev->lock);
2683 spin_lock_bh(&ifa->lock); 2668 spin_lock_bh(&ifa->lock);
@@ -2691,23 +2676,18 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2691 write_lock_bh(&idev->lock); 2676 write_lock_bh(&idev->lock);
2692 } 2677 }
2693#endif 2678#endif
2694 keep_list = NULL;
2695 bifa = &keep_list;
2696 while ((ifa = idev->addr_list) != NULL) {
2697 idev->addr_list = ifa->if_next;
2698 ifa->if_next = NULL;
2699 2679
2680 while (!list_empty(&idev->addr_list)) {
2681 ifa = list_first_entry(&idev->addr_list,
2682 struct inet6_ifaddr, if_list);
2700 addrconf_del_timer(ifa); 2683 addrconf_del_timer(ifa);
2701 2684
2702 /* If just doing link down, and address is permanent 2685 /* If just doing link down, and address is permanent
2703 and not link-local, then retain it. */ 2686 and not link-local, then retain it. */
2704 if (how == 0 && 2687 if (!how &&
2705 (ifa->flags&IFA_F_PERMANENT) && 2688 (ifa->flags&IFA_F_PERMANENT) &&
2706 !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { 2689 !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2707 2690 list_move_tail(&ifa->if_list, &keep_list);
2708 /* Move to holding list */
2709 *bifa = ifa;
2710 bifa = &ifa->if_next;
2711 2691
2712 /* If not doing DAD on this address, just keep it. */ 2692 /* If not doing DAD on this address, just keep it. */
2713 if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || 2693 if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
@@ -2722,24 +2702,32 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2722 /* Flag it for later restoration when link comes up */ 2702 /* Flag it for later restoration when link comes up */
2723 ifa->flags |= IFA_F_TENTATIVE; 2703 ifa->flags |= IFA_F_TENTATIVE;
2724 in6_ifa_hold(ifa); 2704 in6_ifa_hold(ifa);
2705 write_unlock_bh(&idev->lock);
2725 } else { 2706 } else {
2707 list_del(&ifa->if_list);
2726 ifa->dead = 1; 2708 ifa->dead = 1;
2709 write_unlock_bh(&idev->lock);
2710
2711 /* clear hash table */
2712 spin_lock_bh(&addrconf_hash_lock);
2713 hlist_del_init_rcu(&ifa->addr_lst);
2714 spin_unlock_bh(&addrconf_hash_lock);
2727 } 2715 }
2728 write_unlock_bh(&idev->lock);
2729 2716
2730 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2717 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2731 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); 2718 if (ifa->dead)
2719 atomic_notifier_call_chain(&inet6addr_chain,
2720 NETDEV_DOWN, ifa);
2732 in6_ifa_put(ifa); 2721 in6_ifa_put(ifa);
2733 2722
2734 write_lock_bh(&idev->lock); 2723 write_lock_bh(&idev->lock);
2735 } 2724 }
2736 2725
2737 idev->addr_list = keep_list; 2726 list_splice(&keep_list, &idev->addr_list);
2738 2727
2739 write_unlock_bh(&idev->lock); 2728 write_unlock_bh(&idev->lock);
2740 2729
2741 /* Step 5: Discard multicast list */ 2730 /* Step 5: Discard multicast list */
2742
2743 if (how) 2731 if (how)
2744 ipv6_mc_destroy_dev(idev); 2732 ipv6_mc_destroy_dev(idev);
2745 else 2733 else
@@ -2747,8 +2735,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2747 2735
2748 idev->tstamp = jiffies; 2736 idev->tstamp = jiffies;
2749 2737
2750 /* Shot the device (if unregistered) */ 2738 /* Last: Shot the device (if unregistered) */
2751
2752 if (how) { 2739 if (how) {
2753 addrconf_sysctl_unregister(idev); 2740 addrconf_sysctl_unregister(idev);
2754 neigh_parms_release(&nd_tbl, idev->nd_parms); 2741 neigh_parms_release(&nd_tbl, idev->nd_parms);
@@ -2859,7 +2846,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2859 * Optimistic nodes can start receiving 2846 * Optimistic nodes can start receiving
2860 * Frames right away 2847 * Frames right away
2861 */ 2848 */
2862 if(ifp->flags & IFA_F_OPTIMISTIC) 2849 if (ifp->flags & IFA_F_OPTIMISTIC)
2863 ip6_ins_rt(ifp->rt); 2850 ip6_ins_rt(ifp->rt);
2864 2851
2865 addrconf_dad_kick(ifp); 2852 addrconf_dad_kick(ifp);
@@ -2909,7 +2896,7 @@ out:
2909 2896
2910static void addrconf_dad_completed(struct inet6_ifaddr *ifp) 2897static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2911{ 2898{
2912 struct net_device * dev = ifp->idev->dev; 2899 struct net_device *dev = ifp->idev->dev;
2913 2900
2914 /* 2901 /*
2915 * Configure the address for reception. Now it is valid. 2902 * Configure the address for reception. Now it is valid.
@@ -2940,11 +2927,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2940 } 2927 }
2941} 2928}
2942 2929
2943static void addrconf_dad_run(struct inet6_dev *idev) { 2930static void addrconf_dad_run(struct inet6_dev *idev)
2931{
2944 struct inet6_ifaddr *ifp; 2932 struct inet6_ifaddr *ifp;
2945 2933
2946 read_lock_bh(&idev->lock); 2934 read_lock_bh(&idev->lock);
2947 for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) { 2935 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2948 spin_lock(&ifp->lock); 2936 spin_lock(&ifp->lock);
2949 if (!(ifp->flags & IFA_F_TENTATIVE)) { 2937 if (!(ifp->flags & IFA_F_TENTATIVE)) {
2950 spin_unlock(&ifp->lock); 2938 spin_unlock(&ifp->lock);
@@ -2969,36 +2957,35 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
2969 struct net *net = seq_file_net(seq); 2957 struct net *net = seq_file_net(seq);
2970 2958
2971 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { 2959 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
2972 ifa = inet6_addr_lst[state->bucket]; 2960 struct hlist_node *n;
2973 2961 hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket],
2974 while (ifa && !net_eq(dev_net(ifa->idev->dev), net)) 2962 addr_lst)
2975 ifa = ifa->lst_next; 2963 if (net_eq(dev_net(ifa->idev->dev), net))
2976 if (ifa) 2964 return ifa;
2977 break;
2978 } 2965 }
2979 return ifa; 2966 return NULL;
2980} 2967}
2981 2968
2982static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa) 2969static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
2970 struct inet6_ifaddr *ifa)
2983{ 2971{
2984 struct if6_iter_state *state = seq->private; 2972 struct if6_iter_state *state = seq->private;
2985 struct net *net = seq_file_net(seq); 2973 struct net *net = seq_file_net(seq);
2974 struct hlist_node *n = &ifa->addr_lst;
2986 2975
2987 ifa = ifa->lst_next; 2976 hlist_for_each_entry_continue_rcu(ifa, n, addr_lst)
2988try_again: 2977 if (net_eq(dev_net(ifa->idev->dev), net))
2989 if (ifa) { 2978 return ifa;
2990 if (!net_eq(dev_net(ifa->idev->dev), net)) {
2991 ifa = ifa->lst_next;
2992 goto try_again;
2993 }
2994 }
2995 2979
2996 if (!ifa && ++state->bucket < IN6_ADDR_HSIZE) { 2980 while (++state->bucket < IN6_ADDR_HSIZE) {
2997 ifa = inet6_addr_lst[state->bucket]; 2981 hlist_for_each_entry(ifa, n,
2998 goto try_again; 2982 &inet6_addr_lst[state->bucket], addr_lst) {
2983 if (net_eq(dev_net(ifa->idev->dev), net))
2984 return ifa;
2985 }
2999 } 2986 }
3000 2987
3001 return ifa; 2988 return NULL;
3002} 2989}
3003 2990
3004static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) 2991static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
@@ -3006,15 +2993,15 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
3006 struct inet6_ifaddr *ifa = if6_get_first(seq); 2993 struct inet6_ifaddr *ifa = if6_get_first(seq);
3007 2994
3008 if (ifa) 2995 if (ifa)
3009 while(pos && (ifa = if6_get_next(seq, ifa)) != NULL) 2996 while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
3010 --pos; 2997 --pos;
3011 return pos ? NULL : ifa; 2998 return pos ? NULL : ifa;
3012} 2999}
3013 3000
3014static void *if6_seq_start(struct seq_file *seq, loff_t *pos) 3001static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
3015 __acquires(addrconf_hash_lock) 3002 __acquires(rcu)
3016{ 3003{
3017 read_lock_bh(&addrconf_hash_lock); 3004 rcu_read_lock_bh();
3018 return if6_get_idx(seq, *pos); 3005 return if6_get_idx(seq, *pos);
3019} 3006}
3020 3007
@@ -3028,9 +3015,9 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3028} 3015}
3029 3016
3030static void if6_seq_stop(struct seq_file *seq, void *v) 3017static void if6_seq_stop(struct seq_file *seq, void *v)
3031 __releases(addrconf_hash_lock) 3018 __releases(rcu)
3032{ 3019{
3033 read_unlock_bh(&addrconf_hash_lock); 3020 rcu_read_unlock_bh();
3034} 3021}
3035 3022
3036static int if6_seq_show(struct seq_file *seq, void *v) 3023static int if6_seq_show(struct seq_file *seq, void *v)
@@ -3100,10 +3087,12 @@ void if6_proc_exit(void)
3100int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) 3087int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
3101{ 3088{
3102 int ret = 0; 3089 int ret = 0;
3103 struct inet6_ifaddr * ifp; 3090 struct inet6_ifaddr *ifp = NULL;
3104 u8 hash = ipv6_addr_hash(addr); 3091 struct hlist_node *n;
3105 read_lock_bh(&addrconf_hash_lock); 3092 unsigned int hash = ipv6_addr_hash(addr);
3106 for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { 3093
3094 rcu_read_lock_bh();
3095 hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) {
3107 if (!net_eq(dev_net(ifp->idev->dev), net)) 3096 if (!net_eq(dev_net(ifp->idev->dev), net))
3108 continue; 3097 continue;
3109 if (ipv6_addr_equal(&ifp->addr, addr) && 3098 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3112,7 +3101,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
3112 break; 3101 break;
3113 } 3102 }
3114 } 3103 }
3115 read_unlock_bh(&addrconf_hash_lock); 3104 rcu_read_unlock_bh();
3116 return ret; 3105 return ret;
3117} 3106}
3118#endif 3107#endif
@@ -3123,43 +3112,35 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
3123 3112
3124static void addrconf_verify(unsigned long foo) 3113static void addrconf_verify(unsigned long foo)
3125{ 3114{
3115 unsigned long now, next, next_sec, next_sched;
3126 struct inet6_ifaddr *ifp; 3116 struct inet6_ifaddr *ifp;
3127 unsigned long now, next; 3117 struct hlist_node *node;
3128 int i; 3118 int i;
3129 3119
3130 spin_lock_bh(&addrconf_verify_lock); 3120 rcu_read_lock_bh();
3121 spin_lock(&addrconf_verify_lock);
3131 now = jiffies; 3122 now = jiffies;
3132 next = now + ADDR_CHECK_FREQUENCY; 3123 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
3133 3124
3134 del_timer(&addr_chk_timer); 3125 del_timer(&addr_chk_timer);
3135 3126
3136 for (i=0; i < IN6_ADDR_HSIZE; i++) { 3127 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3137
3138restart: 3128restart:
3139 read_lock(&addrconf_hash_lock); 3129 hlist_for_each_entry_rcu(ifp, node,
3140 for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) { 3130 &inet6_addr_lst[i], addr_lst) {
3141 unsigned long age; 3131 unsigned long age;
3142#ifdef CONFIG_IPV6_PRIVACY
3143 unsigned long regen_advance;
3144#endif
3145 3132
3146 if (ifp->flags & IFA_F_PERMANENT) 3133 if (ifp->flags & IFA_F_PERMANENT)
3147 continue; 3134 continue;
3148 3135
3149 spin_lock(&ifp->lock); 3136 spin_lock(&ifp->lock);
3150 age = (now - ifp->tstamp) / HZ; 3137 /* We try to batch several events at once. */
3151 3138 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
3152#ifdef CONFIG_IPV6_PRIVACY
3153 regen_advance = ifp->idev->cnf.regen_max_retry *
3154 ifp->idev->cnf.dad_transmits *
3155 ifp->idev->nd_parms->retrans_time / HZ;
3156#endif
3157 3139
3158 if (ifp->valid_lft != INFINITY_LIFE_TIME && 3140 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
3159 age >= ifp->valid_lft) { 3141 age >= ifp->valid_lft) {
3160 spin_unlock(&ifp->lock); 3142 spin_unlock(&ifp->lock);
3161 in6_ifa_hold(ifp); 3143 in6_ifa_hold(ifp);
3162 read_unlock(&addrconf_hash_lock);
3163 ipv6_del_addr(ifp); 3144 ipv6_del_addr(ifp);
3164 goto restart; 3145 goto restart;
3165 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { 3146 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
@@ -3181,7 +3162,6 @@ restart:
3181 3162
3182 if (deprecate) { 3163 if (deprecate) {
3183 in6_ifa_hold(ifp); 3164 in6_ifa_hold(ifp);
3184 read_unlock(&addrconf_hash_lock);
3185 3165
3186 ipv6_ifa_notify(0, ifp); 3166 ipv6_ifa_notify(0, ifp);
3187 in6_ifa_put(ifp); 3167 in6_ifa_put(ifp);
@@ -3190,6 +3170,10 @@ restart:
3190#ifdef CONFIG_IPV6_PRIVACY 3170#ifdef CONFIG_IPV6_PRIVACY
3191 } else if ((ifp->flags&IFA_F_TEMPORARY) && 3171 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
3192 !(ifp->flags&IFA_F_TENTATIVE)) { 3172 !(ifp->flags&IFA_F_TENTATIVE)) {
3173 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
3174 ifp->idev->cnf.dad_transmits *
3175 ifp->idev->nd_parms->retrans_time / HZ;
3176
3193 if (age >= ifp->prefered_lft - regen_advance) { 3177 if (age >= ifp->prefered_lft - regen_advance) {
3194 struct inet6_ifaddr *ifpub = ifp->ifpub; 3178 struct inet6_ifaddr *ifpub = ifp->ifpub;
3195 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) 3179 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
@@ -3199,7 +3183,7 @@ restart:
3199 in6_ifa_hold(ifp); 3183 in6_ifa_hold(ifp);
3200 in6_ifa_hold(ifpub); 3184 in6_ifa_hold(ifpub);
3201 spin_unlock(&ifp->lock); 3185 spin_unlock(&ifp->lock);
3202 read_unlock(&addrconf_hash_lock); 3186
3203 spin_lock(&ifpub->lock); 3187 spin_lock(&ifpub->lock);
3204 ifpub->regen_count = 0; 3188 ifpub->regen_count = 0;
3205 spin_unlock(&ifpub->lock); 3189 spin_unlock(&ifpub->lock);
@@ -3219,12 +3203,26 @@ restart:
3219 spin_unlock(&ifp->lock); 3203 spin_unlock(&ifp->lock);
3220 } 3204 }
3221 } 3205 }
3222 read_unlock(&addrconf_hash_lock);
3223 } 3206 }
3224 3207
3225 addr_chk_timer.expires = time_before(next, jiffies + HZ) ? jiffies + HZ : next; 3208 next_sec = round_jiffies_up(next);
3209 next_sched = next;
3210
3211 /* If rounded timeout is accurate enough, accept it. */
3212 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
3213 next_sched = next_sec;
3214
3215 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
3216 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
3217 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
3218
3219 ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
3220 now, next, next_sec, next_sched));
3221
3222 addr_chk_timer.expires = next_sched;
3226 add_timer(&addr_chk_timer); 3223 add_timer(&addr_chk_timer);
3227 spin_unlock_bh(&addrconf_verify_lock); 3224 spin_unlock(&addrconf_verify_lock);
3225 rcu_read_unlock_bh();
3228} 3226}
3229 3227
3230static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) 3228static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local)
@@ -3514,8 +3512,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
3514 return nlmsg_end(skb, nlh); 3512 return nlmsg_end(skb, nlh);
3515} 3513}
3516 3514
3517enum addr_type_t 3515enum addr_type_t {
3518{
3519 UNICAST_ADDR, 3516 UNICAST_ADDR,
3520 MULTICAST_ADDR, 3517 MULTICAST_ADDR,
3521 ANYCAST_ADDR, 3518 ANYCAST_ADDR,
@@ -3526,7 +3523,6 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3526 struct netlink_callback *cb, enum addr_type_t type, 3523 struct netlink_callback *cb, enum addr_type_t type,
3527 int s_ip_idx, int *p_ip_idx) 3524 int s_ip_idx, int *p_ip_idx)
3528{ 3525{
3529 struct inet6_ifaddr *ifa;
3530 struct ifmcaddr6 *ifmca; 3526 struct ifmcaddr6 *ifmca;
3531 struct ifacaddr6 *ifaca; 3527 struct ifacaddr6 *ifaca;
3532 int err = 1; 3528 int err = 1;
@@ -3534,11 +3530,12 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3534 3530
3535 read_lock_bh(&idev->lock); 3531 read_lock_bh(&idev->lock);
3536 switch (type) { 3532 switch (type) {
3537 case UNICAST_ADDR: 3533 case UNICAST_ADDR: {
3534 struct inet6_ifaddr *ifa;
3535
3538 /* unicast address incl. temp addr */ 3536 /* unicast address incl. temp addr */
3539 for (ifa = idev->addr_list; ifa; 3537 list_for_each_entry(ifa, &idev->addr_list, if_list) {
3540 ifa = ifa->if_next, ip_idx++) { 3538 if (++ip_idx < s_ip_idx)
3541 if (ip_idx < s_ip_idx)
3542 continue; 3539 continue;
3543 err = inet6_fill_ifaddr(skb, ifa, 3540 err = inet6_fill_ifaddr(skb, ifa,
3544 NETLINK_CB(cb->skb).pid, 3541 NETLINK_CB(cb->skb).pid,
@@ -3549,6 +3546,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3549 break; 3546 break;
3550 } 3547 }
3551 break; 3548 break;
3549 }
3552 case MULTICAST_ADDR: 3550 case MULTICAST_ADDR:
3553 /* multicast address */ 3551 /* multicast address */
3554 for (ifmca = idev->mc_list; ifmca; 3552 for (ifmca = idev->mc_list; ifmca;
@@ -3610,10 +3608,11 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3610 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 3608 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3611 if (idx < s_idx) 3609 if (idx < s_idx)
3612 goto cont; 3610 goto cont;
3613 if (idx > s_idx) 3611 if (h > s_h || idx > s_idx)
3614 s_ip_idx = 0; 3612 s_ip_idx = 0;
3615 ip_idx = 0; 3613 ip_idx = 0;
3616 if ((idev = __in6_dev_get(dev)) == NULL) 3614 idev = __in6_dev_get(dev);
3615 if (!idev)
3617 goto cont; 3616 goto cont;
3618 3617
3619 if (in6_dump_addrs(idev, skb, cb, type, 3618 if (in6_dump_addrs(idev, skb, cb, type,
@@ -3680,12 +3679,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3680 if (ifm->ifa_index) 3679 if (ifm->ifa_index)
3681 dev = __dev_get_by_index(net, ifm->ifa_index); 3680 dev = __dev_get_by_index(net, ifm->ifa_index);
3682 3681
3683 if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { 3682 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
3683 if (!ifa) {
3684 err = -EADDRNOTAVAIL; 3684 err = -EADDRNOTAVAIL;
3685 goto errout; 3685 goto errout;
3686 } 3686 }
3687 3687
3688 if ((skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL)) == NULL) { 3688 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
3689 if (!skb) {
3689 err = -ENOBUFS; 3690 err = -ENOBUFS;
3690 goto errout_ifa; 3691 goto errout_ifa;
3691 } 3692 }
@@ -3810,7 +3811,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
3810static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, 3811static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3811 int bytes) 3812 int bytes)
3812{ 3813{
3813 switch(attrtype) { 3814 switch (attrtype) {
3814 case IFLA_INET6_STATS: 3815 case IFLA_INET6_STATS:
3815 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); 3816 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
3816 break; 3817 break;
@@ -4046,7 +4047,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4046 addrconf_leave_anycast(ifp); 4047 addrconf_leave_anycast(ifp);
4047 addrconf_leave_solict(ifp->idev, &ifp->addr); 4048 addrconf_leave_solict(ifp->idev, &ifp->addr);
4048 dst_hold(&ifp->rt->u.dst); 4049 dst_hold(&ifp->rt->u.dst);
4049 if (ip6_del_rt(ifp->rt)) 4050
4051 if (ifp->dead && ip6_del_rt(ifp->rt))
4050 dst_free(&ifp->rt->u.dst); 4052 dst_free(&ifp->rt->u.dst);
4051 break; 4053 break;
4052 } 4054 }
@@ -4162,211 +4164,211 @@ static struct addrconf_sysctl_table
4162 .sysctl_header = NULL, 4164 .sysctl_header = NULL,
4163 .addrconf_vars = { 4165 .addrconf_vars = {
4164 { 4166 {
4165 .procname = "forwarding", 4167 .procname = "forwarding",
4166 .data = &ipv6_devconf.forwarding, 4168 .data = &ipv6_devconf.forwarding,
4167 .maxlen = sizeof(int), 4169 .maxlen = sizeof(int),
4168 .mode = 0644, 4170 .mode = 0644,
4169 .proc_handler = addrconf_sysctl_forward, 4171 .proc_handler = addrconf_sysctl_forward,
4170 }, 4172 },
4171 { 4173 {
4172 .procname = "hop_limit", 4174 .procname = "hop_limit",
4173 .data = &ipv6_devconf.hop_limit, 4175 .data = &ipv6_devconf.hop_limit,
4174 .maxlen = sizeof(int), 4176 .maxlen = sizeof(int),
4175 .mode = 0644, 4177 .mode = 0644,
4176 .proc_handler = proc_dointvec, 4178 .proc_handler = proc_dointvec,
4177 }, 4179 },
4178 { 4180 {
4179 .procname = "mtu", 4181 .procname = "mtu",
4180 .data = &ipv6_devconf.mtu6, 4182 .data = &ipv6_devconf.mtu6,
4181 .maxlen = sizeof(int), 4183 .maxlen = sizeof(int),
4182 .mode = 0644, 4184 .mode = 0644,
4183 .proc_handler = proc_dointvec, 4185 .proc_handler = proc_dointvec,
4184 }, 4186 },
4185 { 4187 {
4186 .procname = "accept_ra", 4188 .procname = "accept_ra",
4187 .data = &ipv6_devconf.accept_ra, 4189 .data = &ipv6_devconf.accept_ra,
4188 .maxlen = sizeof(int), 4190 .maxlen = sizeof(int),
4189 .mode = 0644, 4191 .mode = 0644,
4190 .proc_handler = proc_dointvec, 4192 .proc_handler = proc_dointvec,
4191 }, 4193 },
4192 { 4194 {
4193 .procname = "accept_redirects", 4195 .procname = "accept_redirects",
4194 .data = &ipv6_devconf.accept_redirects, 4196 .data = &ipv6_devconf.accept_redirects,
4195 .maxlen = sizeof(int), 4197 .maxlen = sizeof(int),
4196 .mode = 0644, 4198 .mode = 0644,
4197 .proc_handler = proc_dointvec, 4199 .proc_handler = proc_dointvec,
4198 }, 4200 },
4199 { 4201 {
4200 .procname = "autoconf", 4202 .procname = "autoconf",
4201 .data = &ipv6_devconf.autoconf, 4203 .data = &ipv6_devconf.autoconf,
4202 .maxlen = sizeof(int), 4204 .maxlen = sizeof(int),
4203 .mode = 0644, 4205 .mode = 0644,
4204 .proc_handler = proc_dointvec, 4206 .proc_handler = proc_dointvec,
4205 }, 4207 },
4206 { 4208 {
4207 .procname = "dad_transmits", 4209 .procname = "dad_transmits",
4208 .data = &ipv6_devconf.dad_transmits, 4210 .data = &ipv6_devconf.dad_transmits,
4209 .maxlen = sizeof(int), 4211 .maxlen = sizeof(int),
4210 .mode = 0644, 4212 .mode = 0644,
4211 .proc_handler = proc_dointvec, 4213 .proc_handler = proc_dointvec,
4212 }, 4214 },
4213 { 4215 {
4214 .procname = "router_solicitations", 4216 .procname = "router_solicitations",
4215 .data = &ipv6_devconf.rtr_solicits, 4217 .data = &ipv6_devconf.rtr_solicits,
4216 .maxlen = sizeof(int), 4218 .maxlen = sizeof(int),
4217 .mode = 0644, 4219 .mode = 0644,
4218 .proc_handler = proc_dointvec, 4220 .proc_handler = proc_dointvec,
4219 }, 4221 },
4220 { 4222 {
4221 .procname = "router_solicitation_interval", 4223 .procname = "router_solicitation_interval",
4222 .data = &ipv6_devconf.rtr_solicit_interval, 4224 .data = &ipv6_devconf.rtr_solicit_interval,
4223 .maxlen = sizeof(int), 4225 .maxlen = sizeof(int),
4224 .mode = 0644, 4226 .mode = 0644,
4225 .proc_handler = proc_dointvec_jiffies, 4227 .proc_handler = proc_dointvec_jiffies,
4226 }, 4228 },
4227 { 4229 {
4228 .procname = "router_solicitation_delay", 4230 .procname = "router_solicitation_delay",
4229 .data = &ipv6_devconf.rtr_solicit_delay, 4231 .data = &ipv6_devconf.rtr_solicit_delay,
4230 .maxlen = sizeof(int), 4232 .maxlen = sizeof(int),
4231 .mode = 0644, 4233 .mode = 0644,
4232 .proc_handler = proc_dointvec_jiffies, 4234 .proc_handler = proc_dointvec_jiffies,
4233 }, 4235 },
4234 { 4236 {
4235 .procname = "force_mld_version", 4237 .procname = "force_mld_version",
4236 .data = &ipv6_devconf.force_mld_version, 4238 .data = &ipv6_devconf.force_mld_version,
4237 .maxlen = sizeof(int), 4239 .maxlen = sizeof(int),
4238 .mode = 0644, 4240 .mode = 0644,
4239 .proc_handler = proc_dointvec, 4241 .proc_handler = proc_dointvec,
4240 }, 4242 },
4241#ifdef CONFIG_IPV6_PRIVACY 4243#ifdef CONFIG_IPV6_PRIVACY
4242 { 4244 {
4243 .procname = "use_tempaddr", 4245 .procname = "use_tempaddr",
4244 .data = &ipv6_devconf.use_tempaddr, 4246 .data = &ipv6_devconf.use_tempaddr,
4245 .maxlen = sizeof(int), 4247 .maxlen = sizeof(int),
4246 .mode = 0644, 4248 .mode = 0644,
4247 .proc_handler = proc_dointvec, 4249 .proc_handler = proc_dointvec,
4248 }, 4250 },
4249 { 4251 {
4250 .procname = "temp_valid_lft", 4252 .procname = "temp_valid_lft",
4251 .data = &ipv6_devconf.temp_valid_lft, 4253 .data = &ipv6_devconf.temp_valid_lft,
4252 .maxlen = sizeof(int), 4254 .maxlen = sizeof(int),
4253 .mode = 0644, 4255 .mode = 0644,
4254 .proc_handler = proc_dointvec, 4256 .proc_handler = proc_dointvec,
4255 }, 4257 },
4256 { 4258 {
4257 .procname = "temp_prefered_lft", 4259 .procname = "temp_prefered_lft",
4258 .data = &ipv6_devconf.temp_prefered_lft, 4260 .data = &ipv6_devconf.temp_prefered_lft,
4259 .maxlen = sizeof(int), 4261 .maxlen = sizeof(int),
4260 .mode = 0644, 4262 .mode = 0644,
4261 .proc_handler = proc_dointvec, 4263 .proc_handler = proc_dointvec,
4262 }, 4264 },
4263 { 4265 {
4264 .procname = "regen_max_retry", 4266 .procname = "regen_max_retry",
4265 .data = &ipv6_devconf.regen_max_retry, 4267 .data = &ipv6_devconf.regen_max_retry,
4266 .maxlen = sizeof(int), 4268 .maxlen = sizeof(int),
4267 .mode = 0644, 4269 .mode = 0644,
4268 .proc_handler = proc_dointvec, 4270 .proc_handler = proc_dointvec,
4269 }, 4271 },
4270 { 4272 {
4271 .procname = "max_desync_factor", 4273 .procname = "max_desync_factor",
4272 .data = &ipv6_devconf.max_desync_factor, 4274 .data = &ipv6_devconf.max_desync_factor,
4273 .maxlen = sizeof(int), 4275 .maxlen = sizeof(int),
4274 .mode = 0644, 4276 .mode = 0644,
4275 .proc_handler = proc_dointvec, 4277 .proc_handler = proc_dointvec,
4276 }, 4278 },
4277#endif 4279#endif
4278 { 4280 {
4279 .procname = "max_addresses", 4281 .procname = "max_addresses",
4280 .data = &ipv6_devconf.max_addresses, 4282 .data = &ipv6_devconf.max_addresses,
4281 .maxlen = sizeof(int), 4283 .maxlen = sizeof(int),
4282 .mode = 0644, 4284 .mode = 0644,
4283 .proc_handler = proc_dointvec, 4285 .proc_handler = proc_dointvec,
4284 }, 4286 },
4285 { 4287 {
4286 .procname = "accept_ra_defrtr", 4288 .procname = "accept_ra_defrtr",
4287 .data = &ipv6_devconf.accept_ra_defrtr, 4289 .data = &ipv6_devconf.accept_ra_defrtr,
4288 .maxlen = sizeof(int), 4290 .maxlen = sizeof(int),
4289 .mode = 0644, 4291 .mode = 0644,
4290 .proc_handler = proc_dointvec, 4292 .proc_handler = proc_dointvec,
4291 }, 4293 },
4292 { 4294 {
4293 .procname = "accept_ra_pinfo", 4295 .procname = "accept_ra_pinfo",
4294 .data = &ipv6_devconf.accept_ra_pinfo, 4296 .data = &ipv6_devconf.accept_ra_pinfo,
4295 .maxlen = sizeof(int), 4297 .maxlen = sizeof(int),
4296 .mode = 0644, 4298 .mode = 0644,
4297 .proc_handler = proc_dointvec, 4299 .proc_handler = proc_dointvec,
4298 }, 4300 },
4299#ifdef CONFIG_IPV6_ROUTER_PREF 4301#ifdef CONFIG_IPV6_ROUTER_PREF
4300 { 4302 {
4301 .procname = "accept_ra_rtr_pref", 4303 .procname = "accept_ra_rtr_pref",
4302 .data = &ipv6_devconf.accept_ra_rtr_pref, 4304 .data = &ipv6_devconf.accept_ra_rtr_pref,
4303 .maxlen = sizeof(int), 4305 .maxlen = sizeof(int),
4304 .mode = 0644, 4306 .mode = 0644,
4305 .proc_handler = proc_dointvec, 4307 .proc_handler = proc_dointvec,
4306 }, 4308 },
4307 { 4309 {
4308 .procname = "router_probe_interval", 4310 .procname = "router_probe_interval",
4309 .data = &ipv6_devconf.rtr_probe_interval, 4311 .data = &ipv6_devconf.rtr_probe_interval,
4310 .maxlen = sizeof(int), 4312 .maxlen = sizeof(int),
4311 .mode = 0644, 4313 .mode = 0644,
4312 .proc_handler = proc_dointvec_jiffies, 4314 .proc_handler = proc_dointvec_jiffies,
4313 }, 4315 },
4314#ifdef CONFIG_IPV6_ROUTE_INFO 4316#ifdef CONFIG_IPV6_ROUTE_INFO
4315 { 4317 {
4316 .procname = "accept_ra_rt_info_max_plen", 4318 .procname = "accept_ra_rt_info_max_plen",
4317 .data = &ipv6_devconf.accept_ra_rt_info_max_plen, 4319 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
4318 .maxlen = sizeof(int), 4320 .maxlen = sizeof(int),
4319 .mode = 0644, 4321 .mode = 0644,
4320 .proc_handler = proc_dointvec, 4322 .proc_handler = proc_dointvec,
4321 }, 4323 },
4322#endif 4324#endif
4323#endif 4325#endif
4324 { 4326 {
4325 .procname = "proxy_ndp", 4327 .procname = "proxy_ndp",
4326 .data = &ipv6_devconf.proxy_ndp, 4328 .data = &ipv6_devconf.proxy_ndp,
4327 .maxlen = sizeof(int), 4329 .maxlen = sizeof(int),
4328 .mode = 0644, 4330 .mode = 0644,
4329 .proc_handler = proc_dointvec, 4331 .proc_handler = proc_dointvec,
4330 }, 4332 },
4331 { 4333 {
4332 .procname = "accept_source_route", 4334 .procname = "accept_source_route",
4333 .data = &ipv6_devconf.accept_source_route, 4335 .data = &ipv6_devconf.accept_source_route,
4334 .maxlen = sizeof(int), 4336 .maxlen = sizeof(int),
4335 .mode = 0644, 4337 .mode = 0644,
4336 .proc_handler = proc_dointvec, 4338 .proc_handler = proc_dointvec,
4337 }, 4339 },
4338#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 4340#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
4339 { 4341 {
4340 .procname = "optimistic_dad", 4342 .procname = "optimistic_dad",
4341 .data = &ipv6_devconf.optimistic_dad, 4343 .data = &ipv6_devconf.optimistic_dad,
4342 .maxlen = sizeof(int), 4344 .maxlen = sizeof(int),
4343 .mode = 0644, 4345 .mode = 0644,
4344 .proc_handler = proc_dointvec, 4346 .proc_handler = proc_dointvec,
4345 4347
4346 }, 4348 },
4347#endif 4349#endif
4348#ifdef CONFIG_IPV6_MROUTE 4350#ifdef CONFIG_IPV6_MROUTE
4349 { 4351 {
4350 .procname = "mc_forwarding", 4352 .procname = "mc_forwarding",
4351 .data = &ipv6_devconf.mc_forwarding, 4353 .data = &ipv6_devconf.mc_forwarding,
4352 .maxlen = sizeof(int), 4354 .maxlen = sizeof(int),
4353 .mode = 0444, 4355 .mode = 0444,
4354 .proc_handler = proc_dointvec, 4356 .proc_handler = proc_dointvec,
4355 }, 4357 },
4356#endif 4358#endif
4357 { 4359 {
4358 .procname = "disable_ipv6", 4360 .procname = "disable_ipv6",
4359 .data = &ipv6_devconf.disable_ipv6, 4361 .data = &ipv6_devconf.disable_ipv6,
4360 .maxlen = sizeof(int), 4362 .maxlen = sizeof(int),
4361 .mode = 0644, 4363 .mode = 0644,
4362 .proc_handler = addrconf_sysctl_disable, 4364 .proc_handler = addrconf_sysctl_disable,
4363 }, 4365 },
4364 { 4366 {
4365 .procname = "accept_dad", 4367 .procname = "accept_dad",
4366 .data = &ipv6_devconf.accept_dad, 4368 .data = &ipv6_devconf.accept_dad,
4367 .maxlen = sizeof(int), 4369 .maxlen = sizeof(int),
4368 .mode = 0644, 4370 .mode = 0644,
4369 .proc_handler = proc_dointvec, 4371 .proc_handler = proc_dointvec,
4370 }, 4372 },
4371 { 4373 {
4372 .procname = "force_tllao", 4374 .procname = "force_tllao",
@@ -4402,8 +4404,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
4402 if (t == NULL) 4404 if (t == NULL)
4403 goto out; 4405 goto out;
4404 4406
4405 for (i=0; t->addrconf_vars[i].data; i++) { 4407 for (i = 0; t->addrconf_vars[i].data; i++) {
4406 t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf; 4408 t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf;
4407 t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ 4409 t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
4408 t->addrconf_vars[i].extra2 = net; 4410 t->addrconf_vars[i].extra2 = net;
4409 } 4411 }
@@ -4540,14 +4542,12 @@ int register_inet6addr_notifier(struct notifier_block *nb)
4540{ 4542{
4541 return atomic_notifier_chain_register(&inet6addr_chain, nb); 4543 return atomic_notifier_chain_register(&inet6addr_chain, nb);
4542} 4544}
4543
4544EXPORT_SYMBOL(register_inet6addr_notifier); 4545EXPORT_SYMBOL(register_inet6addr_notifier);
4545 4546
4546int unregister_inet6addr_notifier(struct notifier_block *nb) 4547int unregister_inet6addr_notifier(struct notifier_block *nb)
4547{ 4548{
4548 return atomic_notifier_chain_unregister(&inet6addr_chain,nb); 4549 return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
4549} 4550}
4550
4551EXPORT_SYMBOL(unregister_inet6addr_notifier); 4551EXPORT_SYMBOL(unregister_inet6addr_notifier);
4552 4552
4553/* 4553/*
@@ -4556,11 +4556,12 @@ EXPORT_SYMBOL(unregister_inet6addr_notifier);
4556 4556
4557int __init addrconf_init(void) 4557int __init addrconf_init(void)
4558{ 4558{
4559 int err; 4559 int i, err;
4560 4560
4561 if ((err = ipv6_addr_label_init()) < 0) { 4561 err = ipv6_addr_label_init();
4562 printk(KERN_CRIT "IPv6 Addrconf: cannot initialize default policy table: %d.\n", 4562 if (err < 0) {
4563 err); 4563 printk(KERN_CRIT "IPv6 Addrconf:"
4564 " cannot initialize default policy table: %d.\n", err);
4564 return err; 4565 return err;
4565 } 4566 }
4566 4567
@@ -4591,6 +4592,9 @@ int __init addrconf_init(void)
4591 if (err) 4592 if (err)
4592 goto errlo; 4593 goto errlo;
4593 4594
4595 for (i = 0; i < IN6_ADDR_HSIZE; i++)
4596 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
4597
4594 register_netdevice_notifier(&ipv6_dev_notf); 4598 register_netdevice_notifier(&ipv6_dev_notf);
4595 4599
4596 addrconf_verify(0); 4600 addrconf_verify(0);
@@ -4619,7 +4623,6 @@ errlo:
4619 4623
4620void addrconf_cleanup(void) 4624void addrconf_cleanup(void)
4621{ 4625{
4622 struct inet6_ifaddr *ifa;
4623 struct net_device *dev; 4626 struct net_device *dev;
4624 int i; 4627 int i;
4625 4628
@@ -4639,20 +4642,10 @@ void addrconf_cleanup(void)
4639 /* 4642 /*
4640 * Check hash table. 4643 * Check hash table.
4641 */ 4644 */
4642 write_lock_bh(&addrconf_hash_lock); 4645 spin_lock_bh(&addrconf_hash_lock);
4643 for (i=0; i < IN6_ADDR_HSIZE; i++) { 4646 for (i = 0; i < IN6_ADDR_HSIZE; i++)
4644 for (ifa=inet6_addr_lst[i]; ifa; ) { 4647 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
4645 struct inet6_ifaddr *bifa; 4648 spin_unlock_bh(&addrconf_hash_lock);
4646
4647 bifa = ifa;
4648 ifa = ifa->lst_next;
4649 printk(KERN_DEBUG "bug: IPv6 address leakage detected: ifa=%p\n", bifa);
4650 /* Do not free it; something is wrong.
4651 Now we can investigate it with debugger.
4652 */
4653 }
4654 }
4655 write_unlock_bh(&addrconf_hash_lock);
4656 4649
4657 del_timer(&addr_chk_timer); 4650 del_timer(&addr_chk_timer);
4658 rtnl_unlock(); 4651 rtnl_unlock();
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 6ff73c4c126a..ae404c9a746c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/in6.h> 15#include <linux/in6.h>
16#include <linux/slab.h>
16#include <net/addrconf.h> 17#include <net/addrconf.h>
17#include <linux/if_addrlabel.h> 18#include <linux/if_addrlabel.h>
18#include <linux/netlink.h> 19#include <linux/netlink.h>
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 37d14e735c27..3192aa02ba5d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -36,6 +36,7 @@
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/stat.h> 37#include <linux/stat.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/slab.h>
39 40
40#include <linux/inet.h> 41#include <linux/inet.h>
41#include <linux/netdevice.h> 42#include <linux/netdevice.h>
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 5ac89025f9de..ee82d4ef26ce 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -26,6 +26,7 @@
26 26
27#include <crypto/hash.h> 27#include <crypto/hash.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/slab.h>
29#include <net/ip.h> 30#include <net/ip.h>
30#include <net/ah.h> 31#include <net/ah.h>
31#include <linux/crypto.h> 32#include <linux/crypto.h>
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index c4f6ca32fa74..b5b07054508a 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -29,6 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/proc_fs.h> 30#include <linux/proc_fs.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/slab.h>
32 33
33#include <net/net_namespace.h> 34#include <net/net_namespace.h>
34#include <net/sock.h> 35#include <net/sock.h>
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e6f9cdf780fe..622dc7939a1b 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -21,6 +21,7 @@
21#include <linux/in6.h> 21#include <linux/in6.h>
22#include <linux/ipv6.h> 22#include <linux/ipv6.h>
23#include <linux/route.h> 23#include <linux/route.h>
24#include <linux/slab.h>
24 25
25#include <net/ipv6.h> 26#include <net/ipv6.h>
26#include <net/ndisc.h> 27#include <net/ndisc.h>
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 074f2c084f9f..8a659f92d17a 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/in6.h> 30#include <linux/in6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/slab.h>
32 33
33#include <net/dst.h> 34#include <net/dst.h>
34#include <net/sock.h> 35#include <net/sock.h>
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 5e463c43fcc2..8124f16f2ac2 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -208,7 +208,6 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
208{ 208{
209 struct fib6_rule *rule6 = (struct fib6_rule *) rule; 209 struct fib6_rule *rule6 = (struct fib6_rule *) rule;
210 210
211 frh->family = AF_INET6;
212 frh->dst_len = rule6->dst.plen; 211 frh->dst_len = rule6->dst.plen;
213 frh->src_len = rule6->src.plen; 212 frh->src_len = rule6->src.plen;
214 frh->tos = rule6->tclass; 213 frh->tos = rule6->tclass;
@@ -239,7 +238,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
239} 238}
240 239
241static struct fib_rules_ops fib6_rules_ops_template = { 240static struct fib_rules_ops fib6_rules_ops_template = {
242 .family = AF_INET6, 241 .family = FIB_RULES_IPV6,
243 .rule_size = sizeof(struct fib6_rule), 242 .rule_size = sizeof(struct fib6_rule),
244 .addr_size = sizeof(struct in6_addr), 243 .addr_size = sizeof(struct in6_addr),
245 .action = fib6_rule_action, 244 .action = fib6_rule_action,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index eb9abe24bdf0..12d2fa42657d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -40,6 +40,7 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/netfilter.h> 42#include <linux/netfilter.h>
43#include <linux/slab.h>
43 44
44#ifdef CONFIG_SYSCTL 45#ifdef CONFIG_SYSCTL
45#include <linux/sysctl.h> 46#include <linux/sysctl.h>
@@ -482,6 +483,7 @@ route_done:
482 np->tclass, NULL, &fl, (struct rt6_info*)dst, 483 np->tclass, NULL, &fl, (struct rt6_info*)dst,
483 MSG_DONTWAIT); 484 MSG_DONTWAIT);
484 if (err) { 485 if (err) {
486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
485 ip6_flush_pending_frames(sk); 487 ip6_flush_pending_frames(sk);
486 goto out_put; 488 goto out_put;
487 } 489 }
@@ -562,6 +564,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
562 (struct rt6_info*)dst, MSG_DONTWAIT); 564 (struct rt6_info*)dst, MSG_DONTWAIT);
563 565
564 if (err) { 566 if (err) {
567 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
565 ip6_flush_pending_frames(sk); 568 ip6_flush_pending_frames(sk);
566 goto out_put; 569 goto out_put;
567 } 570 }
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 3516e6fe2e56..0c5e3c3b7fd5 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -17,6 +17,7 @@
17#include <linux/in6.h> 17#include <linux/in6.h>
18#include <linux/ipv6.h> 18#include <linux/ipv6.h>
19#include <linux/jhash.h> 19#include <linux/jhash.h>
20#include <linux/slab.h>
20 21
21#include <net/addrconf.h> 22#include <net/addrconf.h>
22#include <net/inet_connection_sock.h> 23#include <net/inet_connection_sock.h>
@@ -177,7 +178,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
177 return dst; 178 return dst;
178} 179}
179 180
180int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) 181int inet6_csk_xmit(struct sk_buff *skb)
181{ 182{
182 struct sock *sk = skb->sk; 183 struct sock *sk = skb->sk;
183 struct inet_sock *inet = inet_sk(sk); 184 struct inet_sock *inet = inet_sk(sk);
@@ -233,7 +234,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
233 /* Restore final destination back after routing done */ 234 /* Restore final destination back after routing done */
234 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 235 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
235 236
236 return ip6_xmit(sk, skb, &fl, np->opt, 0); 237 return ip6_xmit(sk, skb, &fl, np->opt);
237} 238}
238 239
239EXPORT_SYMBOL_GPL(inet6_csk_xmit); 240EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 2f9847924fa5..dc6e0b8f260d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -26,6 +26,7 @@
26#include <linux/in6.h> 26#include <linux/in6.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/slab.h>
29 30
30#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
31#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
@@ -127,12 +128,23 @@ static __inline__ u32 fib6_new_sernum(void)
127/* 128/*
128 * test bit 129 * test bit
129 */ 130 */
131#if defined(__LITTLE_ENDIAN)
132# define BITOP_BE32_SWIZZLE (0x1F & ~7)
133#else
134# define BITOP_BE32_SWIZZLE 0
135#endif
130 136
131static __inline__ __be32 addr_bit_set(void *token, int fn_bit) 137static __inline__ __be32 addr_bit_set(void *token, int fn_bit)
132{ 138{
133 __be32 *addr = token; 139 __be32 *addr = token;
134 140 /*
135 return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; 141 * Here,
142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
143 * is optimized version of
144 * htonl(1 << ((~fn_bit)&0x1F))
145 * See include/asm-generic/bitops/le.h.
146 */
147 return (1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & addr[fn_bit >> 5];
136} 148}
137 149
138static __inline__ struct fib6_node * node_alloc(void) 150static __inline__ struct fib6_node * node_alloc(void)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index e41eba8aacf1..14e23216eb28 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -20,6 +20,7 @@
20#include <linux/route.h> 20#include <linux/route.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/slab.h>
23 24
24#include <net/net_namespace.h> 25#include <net/net_namespace.h>
25#include <net/sock.h> 26#include <net/sock.h>
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 2c01dc65794d..a83e9209cecc 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -28,6 +28,7 @@
28#include <linux/in6.h> 28#include <linux/in6.h>
29#include <linux/icmpv6.h> 29#include <linux/icmpv6.h>
30#include <linux/mroute6.h> 30#include <linux/mroute6.h>
31#include <linux/slab.h>
31 32
32#include <linux/netfilter.h> 33#include <linux/netfilter.h>
33#include <linux/netfilter_ipv6.h> 34#include <linux/netfilter_ipv6.h>
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index c10a38a71a5e..7f12e30cfa73 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -37,6 +37,7 @@
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/route.h> 38#include <linux/route.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/slab.h>
40 41
41#include <linux/netfilter.h> 42#include <linux/netfilter.h>
42#include <linux/netfilter_ipv6.h> 43#include <linux/netfilter_ipv6.h>
@@ -178,11 +179,11 @@ int ip6_output(struct sk_buff *skb)
178} 179}
179 180
180/* 181/*
181 * xmit an sk_buff (used by TCP) 182 * xmit an sk_buff (used by TCP, SCTP and DCCP)
182 */ 183 */
183 184
184int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, 185int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
185 struct ipv6_txoptions *opt, int ipfragok) 186 struct ipv6_txoptions *opt)
186{ 187{
187 struct net *net = sock_net(sk); 188 struct net *net = sock_net(sk);
188 struct ipv6_pinfo *np = inet6_sk(sk); 189 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -228,10 +229,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
228 skb_reset_network_header(skb); 229 skb_reset_network_header(skb);
229 hdr = ipv6_hdr(skb); 230 hdr = ipv6_hdr(skb);
230 231
231 /* Allow local fragmentation. */
232 if (ipfragok)
233 skb->local_df = 1;
234
235 /* 232 /*
236 * Fill in the IPv6 header 233 * Fill in the IPv6 header
237 */ 234 */
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 138980eec214..2599870747ec 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -37,6 +37,7 @@
37#include <linux/route.h> 37#include <linux/route.h>
38#include <linux/rtnetlink.h> 38#include <linux/rtnetlink.h>
39#include <linux/netfilter_ipv6.h> 39#include <linux/netfilter_ipv6.h>
40#include <linux/slab.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/atomic.h> 43#include <asm/atomic.h>
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 430372e0bf24..e0b530ca394c 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -33,6 +33,7 @@
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/slab.h>
36#include <net/protocol.h> 37#include <net/protocol.h>
37#include <linux/skbuff.h> 38#include <linux/skbuff.h>
38#include <net/sock.h> 39#include <net/sock.h>
@@ -1113,6 +1114,9 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1113 unsigned char ttls[MAXMIFS]; 1114 unsigned char ttls[MAXMIFS];
1114 int i; 1115 int i;
1115 1116
1117 if (mfc->mf6cc_parent >= MAXMIFS)
1118 return -ENFILE;
1119
1116 memset(ttls, 255, MAXMIFS); 1120 memset(ttls, 255, MAXMIFS);
1117 for (i = 0; i < MAXMIFS; i++) { 1121 for (i = 0; i < MAXMIFS; i++) {
1118 if (IF_ISSET(i, &mfc->mf6cc_ifset)) 1122 if (IF_ISSET(i, &mfc->mf6cc_ifset))
@@ -1692,17 +1696,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1692 int ct; 1696 int ct;
1693 struct rtnexthop *nhp; 1697 struct rtnexthop *nhp;
1694 struct net *net = mfc6_net(c); 1698 struct net *net = mfc6_net(c);
1695 struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev;
1696 u8 *b = skb_tail_pointer(skb); 1699 u8 *b = skb_tail_pointer(skb);
1697 struct rtattr *mp_head; 1700 struct rtattr *mp_head;
1698 1701
1699 if (dev) 1702 /* If cache is unresolved, don't try to parse IIF and OIF */
1700 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1703 if (c->mf6c_parent > MAXMIFS)
1704 return -ENOENT;
1705
1706 if (MIF_EXISTS(net, c->mf6c_parent))
1707 RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
1701 1708
1702 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1709 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1703 1710
1704 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1711 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1705 if (c->mfc_un.res.ttls[ct] < 255) { 1712 if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
1706 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1713 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1707 goto rtattr_failure; 1714 goto rtattr_failure;
1708 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1715 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 430454ee5ead..1160400e9dbd 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/sysctl.h> 37#include <linux/sysctl.h>
38#include <linux/netfilter.h> 38#include <linux/netfilter.h>
39#include <linux/slab.h>
39 40
40#include <net/sock.h> 41#include <net/sock.h>
41#include <net/snmp.h> 42#include <net/snmp.h>
@@ -113,9 +114,9 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
113 } 114 }
114 opt = xchg(&inet6_sk(sk)->opt, opt); 115 opt = xchg(&inet6_sk(sk)->opt, opt);
115 } else { 116 } else {
116 write_lock(&sk->sk_dst_lock); 117 spin_lock(&sk->sk_dst_lock);
117 opt = xchg(&inet6_sk(sk)->opt, opt); 118 opt = xchg(&inet6_sk(sk)->opt, opt);
118 write_unlock(&sk->sk_dst_lock); 119 spin_unlock(&sk->sk_dst_lock);
119 } 120 }
120 sk_dst_reset(sk); 121 sk_dst_reset(sk);
121 122
@@ -970,14 +971,13 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
970 case IPV6_MTU: 971 case IPV6_MTU:
971 { 972 {
972 struct dst_entry *dst; 973 struct dst_entry *dst;
974
973 val = 0; 975 val = 0;
974 lock_sock(sk); 976 rcu_read_lock();
975 dst = sk_dst_get(sk); 977 dst = __sk_dst_get(sk);
976 if (dst) { 978 if (dst)
977 val = dst_mtu(dst); 979 val = dst_mtu(dst);
978 dst_release(dst); 980 rcu_read_unlock();
979 }
980 release_sock(sk);
981 if (!val) 981 if (!val)
982 return -ENOTCONN; 982 return -ENOTCONN;
983 break; 983 break;
@@ -1065,12 +1065,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1065 else 1065 else
1066 val = np->mcast_hops; 1066 val = np->mcast_hops;
1067 1067
1068 dst = sk_dst_get(sk); 1068 if (val < 0) {
1069 if (dst) { 1069 rcu_read_lock();
1070 if (val < 0) 1070 dst = __sk_dst_get(sk);
1071 if (dst)
1071 val = ip6_dst_hoplimit(dst); 1072 val = ip6_dst_hoplimit(dst);
1072 dst_release(dst); 1073 rcu_read_unlock();
1073 } 1074 }
1075
1074 if (val < 0) 1076 if (val < 0)
1075 val = sock_net(sk)->ipv6.devconf_all->hop_limit; 1077 val = sock_net(sk)->ipv6.devconf_all->hop_limit;
1076 break; 1078 break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 773b9d18b748..f9d05ce4e03a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -43,6 +43,7 @@
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
45#include <linux/seq_file.h> 45#include <linux/seq_file.h>
46#include <linux/slab.h>
46 47
47#include <linux/netfilter.h> 48#include <linux/netfilter.h>
48#include <linux/netfilter_ipv6.h> 49#include <linux/netfilter_ipv6.h>
@@ -714,7 +715,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
714 if (!(mc->mca_flags&MAF_LOADED)) { 715 if (!(mc->mca_flags&MAF_LOADED)) {
715 mc->mca_flags |= MAF_LOADED; 716 mc->mca_flags |= MAF_LOADED;
716 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 717 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
717 dev_mc_add(dev, buf, dev->addr_len, 0); 718 dev_mc_add(dev, buf);
718 } 719 }
719 spin_unlock_bh(&mc->mca_lock); 720 spin_unlock_bh(&mc->mca_lock);
720 721
@@ -740,7 +741,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
740 if (mc->mca_flags&MAF_LOADED) { 741 if (mc->mca_flags&MAF_LOADED) {
741 mc->mca_flags &= ~MAF_LOADED; 742 mc->mca_flags &= ~MAF_LOADED;
742 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 743 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
743 dev_mc_delete(dev, buf, dev->addr_len, 0); 744 dev_mc_del(dev, buf);
744 } 745 }
745 746
746 if (mc->mca_flags & MAF_NOREPORT) 747 if (mc->mca_flags & MAF_NOREPORT)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 8e96a350f52f..3f7c12b70a26 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -59,6 +59,7 @@
59#include <linux/route.h> 59#include <linux/route.h>
60#include <linux/init.h> 60#include <linux/init.h>
61#include <linux/rcupdate.h> 61#include <linux/rcupdate.h>
62#include <linux/slab.h>
62#ifdef CONFIG_SYSCTL 63#ifdef CONFIG_SYSCTL
63#include <linux/sysctl.h> 64#include <linux/sysctl.h>
64#endif 65#endif
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 39856a25189c..8656eb75520c 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -25,6 +25,7 @@
25#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/slab.h>
28#include <net/net_namespace.h> 29#include <net/net_namespace.h>
29#include <net/sock.h> 30#include <net/sock.h>
30#include <net/ipv6.h> 31#include <net/ipv6.h>
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index dad97622ed72..af1d6494ac39 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -15,6 +15,8 @@
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/gfp.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/skbuff.h> 21#include <linux/skbuff.h>
20#include <linux/icmpv6.h> 22#include <linux/icmpv6.h>
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 5e6acdae6d80..e424e7c8f824 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -145,11 +145,11 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
145 } 145 }
146 146
147 /* Step to the next */ 147 /* Step to the next */
148 pr_debug("len%04X \n", optlen); 148 pr_debug("len%04X\n", optlen);
149 149
150 if ((ptr > skb->len - optlen || hdrlen < optlen) && 150 if ((ptr > skb->len - optlen || hdrlen < optlen) &&
151 temp < optinfo->optsnr - 1) { 151 temp < optinfo->optsnr - 1) {
152 pr_debug("new pointer is too large! \n"); 152 pr_debug("new pointer is too large!\n");
153 break; 153 break;
154 } 154 }
155 ptr += optlen; 155 ptr += optlen;
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 36b72cafc227..d6fc9aff3163 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/netfilter_ipv6/ip6_tables.h> 14#include <linux/netfilter_ipv6/ip6_tables.h>
15#include <linux/slab.h>
15 16
16MODULE_LICENSE("GPL"); 17MODULE_LICENSE("GPL");
17MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 18MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 7844e557c0ec..6a102b57f356 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/netfilter_ipv6/ip6_tables.h> 12#include <linux/netfilter_ipv6/ip6_tables.h>
13#include <linux/slab.h>
13 14
14MODULE_LICENSE("GPL"); 15MODULE_LICENSE("GPL");
15MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 16MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index aef31a29de9e..5b9926a011bd 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/netfilter_ipv6/ip6_tables.h> 7#include <linux/netfilter_ipv6/ip6_tables.h>
8#include <linux/slab.h>
8 9
9#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 10#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
10 11
@@ -13,7 +14,7 @@ static const struct xt_table packet_raw = {
13 .valid_hooks = RAW_VALID_HOOKS, 14 .valid_hooks = RAW_VALID_HOOKS,
14 .me = THIS_MODULE, 15 .me = THIS_MODULE,
15 .af = NFPROTO_IPV6, 16 .af = NFPROTO_IPV6,
16 .priority = NF_IP6_PRI_FIRST, 17 .priority = NF_IP6_PRI_RAW,
17}; 18};
18 19
19/* The work comes in here from netfilter.c. */ 20/* The work comes in here from netfilter.c. */
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 0824d865aa9b..91aa2b4d83c9 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -17,6 +17,7 @@
17 */ 17 */
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/netfilter_ipv6/ip6_tables.h> 19#include <linux/netfilter_ipv6/ip6_tables.h>
20#include <linux/slab.h>
20 21
21MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
22MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); 23MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 8f80e245f370..6fb890187de0 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -27,6 +27,7 @@
27#include <linux/ipv6.h> 27#include <linux/ipv6.h>
28#include <linux/icmpv6.h> 28#include <linux/icmpv6.h>
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/slab.h>
30 31
31#include <net/sock.h> 32#include <net/sock.h>
32#include <net/snmp.h> 33#include <net/snmp.h>
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 58344c0fbd13..458eabfbe130 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -97,6 +97,7 @@ static const struct snmp_mib snmp6_icmp6_list[] = {
97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), 97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), 98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
99 SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), 99 SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
100 SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS),
100 SNMP_MIB_SENTINEL 101 SNMP_MIB_SENTINEL
101}; 102};
102 103
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e9e1f774b0b7..554b48b6e993 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -21,6 +21,7 @@
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/socket.h> 23#include <linux/socket.h>
24#include <linux/slab.h>
24#include <linux/sockios.h> 25#include <linux/sockios.h>
25#include <linux/net.h> 26#include <linux/net.h>
26#include <linux/in6.h> 27#include <linux/in6.h>
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index a555156e9779..6d4292ff5854 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -41,6 +41,7 @@
41#include <linux/random.h> 41#include <linux/random.h>
42#include <linux/jhash.h> 42#include <linux/jhash.h>
43#include <linux/skbuff.h> 43#include <linux/skbuff.h>
44#include <linux/slab.h>
44 45
45#include <net/sock.h> 46#include <net/sock.h>
46#include <net/snmp.h> 47#include <net/snmp.h>
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 52cd3eff31dc..c2438e8cb9d0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -40,6 +40,7 @@
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/seq_file.h> 41#include <linux/seq_file.h>
42#include <linux/nsproxy.h> 42#include <linux/nsproxy.h>
43#include <linux/slab.h>
43#include <net/net_namespace.h> 44#include <net/net_namespace.h>
44#include <net/snmp.h> 45#include <net/snmp.h>
45#include <net/ipv6.h> 46#include <net/ipv6.h>
@@ -879,7 +880,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
879 880
880 rt = (struct rt6_info *) dst; 881 rt = (struct rt6_info *) dst;
881 882
882 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) 883 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
883 return dst; 884 return dst;
884 885
885 return NULL; 886 return NULL;
@@ -890,12 +891,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
890 struct rt6_info *rt = (struct rt6_info *) dst; 891 struct rt6_info *rt = (struct rt6_info *) dst;
891 892
892 if (rt) { 893 if (rt) {
893 if (rt->rt6i_flags & RTF_CACHE) 894 if (rt->rt6i_flags & RTF_CACHE) {
894 ip6_del_rt(rt); 895 if (rt6_check_expired(rt)) {
895 else 896 ip6_del_rt(rt);
897 dst = NULL;
898 }
899 } else {
896 dst_release(dst); 900 dst_release(dst);
901 dst = NULL;
902 }
897 } 903 }
898 return NULL; 904 return dst;
899} 905}
900 906
901static void ip6_link_failure(struct sk_buff *skb) 907static void ip6_link_failure(struct sk_buff *skb)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b1eea811be48..5abae10cd884 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -28,6 +28,7 @@
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/if_arp.h> 29#include <linux/if_arp.h>
30#include <linux/icmp.h> 30#include <linux/icmp.h>
31#include <linux/slab.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/netfilter_ipv4.h> 34#include <linux/netfilter_ipv4.h>
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index f841d93bf987..fa1d8f4e0051 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -9,6 +9,7 @@
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <linux/in6.h> 10#include <linux/in6.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <linux/slab.h>
12#include <net/ndisc.h> 13#include <net/ndisc.h>
13#include <net/ipv6.h> 14#include <net/ipv6.h>
14#include <net/addrconf.h> 15#include <net/addrconf.h>
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9b6dbba80d31..bd5ef7b6e48e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -38,6 +38,7 @@
38#include <linux/jhash.h> 38#include <linux/jhash.h>
39#include <linux/ipsec.h> 39#include <linux/ipsec.h>
40#include <linux/times.h> 40#include <linux/times.h>
41#include <linux/slab.h>
41 42
42#include <linux/ipv6.h> 43#include <linux/ipv6.h>
43#include <linux/icmpv6.h> 44#include <linux/icmpv6.h>
@@ -74,6 +75,9 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req); 75 struct request_sock *req);
75 76
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr,
80 struct in6_addr *daddr);
77 81
78static const struct inet_connection_sock_af_ops ipv6_mapped; 82static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific; 83static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -502,14 +506,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
502 506
503 skb = tcp_make_synack(sk, dst, req, rvp); 507 skb = tcp_make_synack(sk, dst, req, rvp);
504 if (skb) { 508 if (skb) {
505 struct tcphdr *th = tcp_hdr(skb); 509 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
506
507 th->check = tcp_v6_check(skb->len,
508 &treq->loc_addr, &treq->rmt_addr,
509 csum_partial(th, skb->len, skb->csum));
510 510
511 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 511 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
512 err = ip6_xmit(sk, skb, &fl, opt, 0); 512 err = ip6_xmit(sk, skb, &fl, opt);
513 err = net_xmit_eval(err); 513 err = net_xmit_eval(err);
514 } 514 }
515 515
@@ -917,22 +917,29 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
917 .twsk_destructor= tcp_twsk_destructor, 917 .twsk_destructor= tcp_twsk_destructor,
918}; 918};
919 919
920static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) 920static void __tcp_v6_send_check(struct sk_buff *skb,
921 struct in6_addr *saddr, struct in6_addr *daddr)
921{ 922{
922 struct ipv6_pinfo *np = inet6_sk(sk);
923 struct tcphdr *th = tcp_hdr(skb); 923 struct tcphdr *th = tcp_hdr(skb);
924 924
925 if (skb->ip_summed == CHECKSUM_PARTIAL) { 925 if (skb->ip_summed == CHECKSUM_PARTIAL) {
926 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); 926 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
927 skb->csum_start = skb_transport_header(skb) - skb->head; 927 skb->csum_start = skb_transport_header(skb) - skb->head;
928 skb->csum_offset = offsetof(struct tcphdr, check); 928 skb->csum_offset = offsetof(struct tcphdr, check);
929 } else { 929 } else {
930 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 930 th->check = tcp_v6_check(skb->len, saddr, daddr,
931 csum_partial(th, th->doff<<2, 931 csum_partial(th, th->doff << 2,
932 skb->csum)); 932 skb->csum));
933 } 933 }
934} 934}
935 935
936static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
937{
938 struct ipv6_pinfo *np = inet6_sk(sk);
939
940 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
941}
942
936static int tcp_v6_gso_send_check(struct sk_buff *skb) 943static int tcp_v6_gso_send_check(struct sk_buff *skb)
937{ 944{
938 struct ipv6hdr *ipv6h; 945 struct ipv6hdr *ipv6h;
@@ -945,11 +952,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
945 th = tcp_hdr(skb); 952 th = tcp_hdr(skb);
946 953
947 th->check = 0; 954 th->check = 0;
948 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
949 IPPROTO_TCP, 0);
950 skb->csum_start = skb_transport_header(skb) - skb->head;
951 skb->csum_offset = offsetof(struct tcphdr, check);
952 skb->ip_summed = CHECKSUM_PARTIAL; 955 skb->ip_summed = CHECKSUM_PARTIAL;
956 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
953 return 0; 957 return 0;
954} 958}
955 959
@@ -1052,9 +1056,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1052 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); 1056 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1053 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); 1057 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1054 1058
1055 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, 1059 __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
1056 tot_len, IPPROTO_TCP,
1057 buff->csum);
1058 1060
1059 fl.proto = IPPROTO_TCP; 1061 fl.proto = IPPROTO_TCP;
1060 fl.oif = inet6_iif(skb); 1062 fl.oif = inet6_iif(skb);
@@ -1069,7 +1071,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1069 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { 1071 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1070 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { 1072 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1071 skb_dst_set(buff, dst); 1073 skb_dst_set(buff, dst);
1072 ip6_xmit(ctl_sk, buff, &fl, NULL, 0); 1074 ip6_xmit(ctl_sk, buff, &fl, NULL);
1073 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1075 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1074 if (rst) 1076 if (rst)
1075 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 1077 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index e17bc1dfc1a4..fc3c86a47452 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -25,6 +25,7 @@
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/slab.h>
28#include <net/ipv6.h> 29#include <net/ipv6.h>
29#include <net/protocol.h> 30#include <net/protocol.h>
30#include <net/xfrm.h> 31#include <net/xfrm.h>
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3c0c9c755c92..90824852f598 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/slab.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
39#include <net/ndisc.h> 40#include <net/ndisc.h>
@@ -258,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
258 if (hslot->count < hslot2->count) 259 if (hslot->count < hslot2->count)
259 goto begin; 260 goto begin;
260 261
261 result = udp6_lib_lookup2(net, &in6addr_any, sport, 262 result = udp6_lib_lookup2(net, saddr, sport,
262 daddr, hnum, dif, 263 &in6addr_any, hnum, dif,
263 hslot2, slot2); 264 hslot2, slot2);
264 } 265 }
265 rcu_read_unlock(); 266 rcu_read_unlock();
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 3927832227b9..b809812c8d30 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -5,6 +5,7 @@
5 * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> 5 * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au>
6 */ 6 */
7 7
8#include <linux/gfp.h>
8#include <linux/init.h> 9#include <linux/init.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/module.h> 11#include <linux/module.h>
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ae181651c75a..8c452fd5ceae 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -67,36 +67,6 @@ static int xfrm6_get_saddr(struct net *net,
67 return 0; 67 return 0;
68} 68}
69 69
70static struct dst_entry *
71__xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
72{
73 struct dst_entry *dst;
74
75 /* Still not clear if we should set fl->fl6_{src,dst}... */
76 read_lock_bh(&policy->lock);
77 for (dst = policy->bundles; dst; dst = dst->next) {
78 struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
79 struct in6_addr fl_dst_prefix, fl_src_prefix;
80
81 ipv6_addr_prefix(&fl_dst_prefix,
82 &fl->fl6_dst,
83 xdst->u.rt6.rt6i_dst.plen);
84 ipv6_addr_prefix(&fl_src_prefix,
85 &fl->fl6_src,
86 xdst->u.rt6.rt6i_src.plen);
87 if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) &&
88 ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) &&
89 xfrm_bundle_ok(policy, xdst, fl, AF_INET6,
90 (xdst->u.rt6.rt6i_dst.plen != 128 ||
91 xdst->u.rt6.rt6i_src.plen != 128))) {
92 dst_clone(dst);
93 break;
94 }
95 }
96 read_unlock_bh(&policy->lock);
97 return dst;
98}
99
100static int xfrm6_get_tos(struct flowi *fl) 70static int xfrm6_get_tos(struct flowi *fl)
101{ 71{
102 return 0; 72 return 0;
@@ -291,7 +261,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
291 .dst_ops = &xfrm6_dst_ops, 261 .dst_ops = &xfrm6_dst_ops,
292 .dst_lookup = xfrm6_dst_lookup, 262 .dst_lookup = xfrm6_dst_lookup,
293 .get_saddr = xfrm6_get_saddr, 263 .get_saddr = xfrm6_get_saddr,
294 .find_bundle = __xfrm6_find_bundle,
295 .decode_session = _decode_session6, 264 .decode_session = _decode_session6,
296 .get_tos = xfrm6_get_tos, 265 .get_tos = xfrm6_get_tos,
297 .init_path = xfrm6_init_path, 266 .init_path = xfrm6_init_path,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index fa85a7d22dc4..2ce3a8278f26 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -23,6 +23,7 @@
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/xfrm.h> 25#include <linux/xfrm.h>
26#include <linux/slab.h>
26#include <linux/rculist.h> 27#include <linux/rculist.h>
27#include <net/ip.h> 28#include <net/ip.h>
28#include <net/xfrm.h> 29#include <net/xfrm.h>
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index f9759b54a6de..da3d21c41d90 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -40,6 +40,7 @@
40#include <linux/net.h> 40#include <linux/net.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/uio.h> 42#include <linux/uio.h>
43#include <linux/slab.h>
43#include <linux/skbuff.h> 44#include <linux/skbuff.h>
44#include <linux/smp_lock.h> 45#include <linux/smp_lock.h>
45#include <linux/socket.h> 46#include <linux/socket.h>
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index e16c11423527..30f4519b092f 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/route.h> 11#include <linux/route.h>
12#include <linux/slab.h>
12#include <linux/spinlock.h> 13#include <linux/spinlock.h>
13 14
14#include <net/ipx.h> 15#include <net/ipx.h>
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 10093aab6173..2a4efcea3423 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -48,6 +48,7 @@
48#include <linux/smp_lock.h> 48#include <linux/smp_lock.h>
49#include <linux/socket.h> 49#include <linux/socket.h>
50#include <linux/sockios.h> 50#include <linux/sockios.h>
51#include <linux/slab.h>
51#include <linux/init.h> 52#include <linux/init.h>
52#include <linux/net.h> 53#include <linux/net.h>
53#include <linux/irda.h> 54#include <linux/irda.h>
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index a6f99b5a1499..c1c8ae939126 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -34,6 +34,7 @@
34#include <linux/socket.h> 34#include <linux/socket.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/slab.h>
37 38
38#include <net/irda/irda.h> 39#include <net/irda/irda.h>
39#include <net/irda/irlmp.h> 40#include <net/irda/irlmp.h>
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 018c92941aba..e97082017f4f 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -33,6 +33,7 @@
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/slab.h>
36 37
37#include <net/irda/irda.h> 38#include <net/irda/irda.h>
38#include <net/irda/irmod.h> 39#include <net/irda/irmod.h>
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 7ba96618660e..08fb54dc8c41 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -31,6 +31,7 @@
31 ********************************************************************/ 31 ********************************************************************/
32 32
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/gfp.h>
34 35
35#include <net/irda/irda.h> 36#include <net/irda/irda.h>
36#include <net/irda/irlmp.h> 37#include <net/irda/irlmp.h>
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index d57aefd9fe77..8b915f3ac3b9 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -28,6 +28,7 @@
28 * 28 *
29 ********************************************************************/ 29 ********************************************************************/
30 30
31#include <linux/gfp.h>
31#include <linux/workqueue.h> 32#include <linux/workqueue.h>
32#include <linux/interrupt.h> 33#include <linux/interrupt.h>
33 34
@@ -474,7 +475,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
474 /* Check if any of the settings have changed */ 475 /* Check if any of the settings have changed */
475 if (dce & 0x0f) { 476 if (dce & 0x0f) {
476 if (dce & IRCOMM_DELTA_CTS) { 477 if (dce & IRCOMM_DELTA_CTS) {
477 IRDA_DEBUG(2, "%s(), CTS \n", __func__ ); 478 IRDA_DEBUG(2, "%s(), CTS\n", __func__ );
478 } 479 }
479 } 480 }
480 481
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 8b85d774e47f..faa82ca2dfdc 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -33,6 +33,7 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/slab.h>
36#include <linux/sched.h> 37#include <linux/sched.h>
37#include <linux/seq_file.h> 38#include <linux/seq_file.h>
38#include <linux/termios.h> 39#include <linux/termios.h>
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index bf92e1473447..25cc2e695158 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -41,6 +41,7 @@
41#include <linux/tty.h> 41#include <linux/tty.h>
42#include <linux/kmod.h> 42#include <linux/kmod.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/slab.h>
44 45
45#include <asm/ioctls.h> 46#include <asm/ioctls.h>
46#include <asm/uaccess.h> 47#include <asm/uaccess.h>
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 294e34d3517c..79a1e5a23e10 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -31,6 +31,7 @@
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/slab.h>
34 35
35#include <asm/byteorder.h> 36#include <asm/byteorder.h>
36#include <asm/unaligned.h> 37#include <asm/unaligned.h>
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index a301cbd93785..703774e29e32 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -24,6 +24,8 @@
24 * 24 *
25 ********************************************************************/ 25 ********************************************************************/
26 26
27#include <linux/slab.h>
28
27#include <net/irda/irda.h> 29#include <net/irda/irda.h>
28#include <net/irda/irlmp.h> 30#include <net/irda/irlmp.h>
29#include <net/irda/iriap.h> 31#include <net/irda/iriap.h>
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 99ebb96f1386..f07ed9fd5792 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -22,6 +22,7 @@
22 * 22 *
23 ********************************************************************/ 23 ********************************************************************/
24 24
25#include <linux/slab.h>
25#include <linux/string.h> 26#include <linux/string.h>
26#include <linux/socket.h> 27#include <linux/socket.h>
27#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index 42f7d960d055..7ed3af957935 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/slab.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index e486dc89ea59..a788f9e9427d 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/gfp.h>
30#include <linux/init.h> 31#include <linux/init.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 3f81f81b2dfa..5cf5e6c872bb 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/random.h> 35#include <linux/random.h>
36#include <linux/bitops.h> 36#include <linux/bitops.h>
37#include <linux/slab.h>
37 38
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/byteorder.h> 40#include <asm/byteorder.h>
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 94a9884d7146..d434c8880745 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/slab.h>
32 33
33#include <net/irda/irda.h> 34#include <net/irda/irda.h>
34#include <net/irda/irlap_event.h> 35#include <net/irda/irlap_event.h>
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 7af2e74deda8..688222cbf55b 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -29,6 +29,7 @@
29#include <linux/if_ether.h> 29#include <linux/if_ether.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/irda.h> 31#include <linux/irda.h>
32#include <linux/slab.h>
32 33
33#include <net/pkt_sched.h> 34#include <net/pkt_sched.h>
34#include <net/sock.h> 35#include <net/sock.h>
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index b26dee784aba..df18ab4b6c5e 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -11,6 +11,7 @@
11#include "irnet_irda.h" /* Private header */ 11#include "irnet_irda.h" /* Private header */
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/seq_file.h> 13#include <linux/seq_file.h>
14#include <linux/slab.h>
14#include <asm/unaligned.h> 15#include <asm/unaligned.h>
15 16
16/* 17/*
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 6b3602de359a..6a1a202710c5 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/slab.h>
17#include <linux/smp_lock.h> 18#include <linux/smp_lock.h>
18#include "irnet_ppp.h" /* Private header */ 19#include "irnet_ppp.h" /* Private header */
19/* Please put other headers in irnet.h - Thanks */ 20/* Please put other headers in irnet.h - Thanks */
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 69b5b75f5431..6c7c4b92e4f8 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/socket.h> 16#include <linux/socket.h>
17#include <linux/irda.h> 17#include <linux/irda.h>
18#include <linux/gfp.h>
18#include <net/net_namespace.h> 19#include <net/net_namespace.h>
19#include <net/sock.h> 20#include <net/sock.h>
20#include <net/irda/irda.h> 21#include <net/irda/irda.h>
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index ba01938becb5..849aaf0dabb5 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -192,6 +192,7 @@
192 * Jean II 192 * Jean II
193 */ 193 */
194#include <linux/module.h> 194#include <linux/module.h>
195#include <linux/slab.h>
195 196
196#include <net/irda/irda.h> 197#include <net/irda/irda.h>
197#include <net/irda/irqueue.h> 198#include <net/irda/irqueue.h>
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 9cb79f95bf63..47db1d8a0d92 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/slab.h>
31 32
32#include <asm/byteorder.h> 33#include <asm/byteorder.h>
33#include <asm/unaligned.h> 34#include <asm/unaligned.h>
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 368707882647..ba9a3fcc2fed 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -26,6 +26,7 @@
26#include <linux/in6.h> 26#include <linux/in6.h>
27#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/slab.h>
29#include <net/net_namespace.h> 30#include <net/net_namespace.h>
30#include <net/netns/generic.h> 31#include <net/netns/generic.h>
31#include <net/xfrm.h> 32#include <net/xfrm.h>
@@ -2129,10 +2130,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
2129 int err; 2130 int err;
2130 2131
2131 out_skb = pfkey_xfrm_policy2msg_prep(xp); 2132 out_skb = pfkey_xfrm_policy2msg_prep(xp);
2132 if (IS_ERR(out_skb)) { 2133 if (IS_ERR(out_skb))
2133 err = PTR_ERR(out_skb); 2134 return PTR_ERR(out_skb);
2134 goto out; 2135
2135 }
2136 err = pfkey_xfrm_policy2msg(out_skb, xp, dir); 2136 err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
2137 if (err < 0) 2137 if (err < 0)
2138 return err; 2138 return err;
@@ -2148,7 +2148,6 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
2148 out_hdr->sadb_msg_seq = c->seq; 2148 out_hdr->sadb_msg_seq = c->seq;
2149 out_hdr->sadb_msg_pid = c->pid; 2149 out_hdr->sadb_msg_pid = c->pid;
2150 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); 2150 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
2151out:
2152 return 0; 2151 return 0;
2153 2152
2154} 2153}
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig
new file mode 100644
index 000000000000..4b1e71751e10
--- /dev/null
+++ b/net/l2tp/Kconfig
@@ -0,0 +1,107 @@
1#
2# Layer Two Tunneling Protocol (L2TP)
3#
4
5menuconfig L2TP
6 tristate "Layer Two Tunneling Protocol (L2TP)"
7 depends on INET
8 ---help---
9 Layer Two Tunneling Protocol
10
11 From RFC 2661 <http://www.ietf.org/rfc/rfc2661.txt>.
12
13 L2TP facilitates the tunneling of packets across an
14 intervening network in a way that is as transparent as
15 possible to both end-users and applications.
16
17 L2TP is often used to tunnel PPP traffic over IP
18 tunnels. One IP tunnel may carry thousands of individual PPP
19 connections. L2TP is also used as a VPN protocol, popular
20 with home workers to connect to their offices.
21
22 L2TPv3 allows other protocols as well as PPP to be carried
23 over L2TP tunnels. L2TPv3 is defined in RFC 3931
24 <http://www.ietf.org/rfc/rfc3931.txt>.
25
26 The kernel component handles only L2TP data packets: a
27 userland daemon handles L2TP the control protocol (tunnel
28 and session setup). One such daemon is OpenL2TP
29 (http://openl2tp.org/).
30
31 If you don't need L2TP, say N. To compile all L2TP code as
32 modules, choose M here.
33
34config L2TP_DEBUGFS
35 tristate "L2TP debugfs support"
36 depends on L2TP && DEBUG_FS
37 help
38 Support for l2tp directory in debugfs filesystem. This may be
39 used to dump internal state of the l2tp drivers for problem
40 analysis.
41
42 If unsure, say 'Y'.
43
44 To compile this driver as a module, choose M here. The module
45 will be called l2tp_debugfs.
46
47config L2TP_V3
48 bool "L2TPv3 support (EXPERIMENTAL)"
49 depends on EXPERIMENTAL && L2TP
50 help
51 Layer Two Tunneling Protocol Version 3
52
53 From RFC 3931 <http://www.ietf.org/rfc/rfc3931.txt>.
54
55 The Layer Two Tunneling Protocol (L2TP) provides a dynamic
56 mechanism for tunneling Layer 2 (L2) "circuits" across a
57 packet-oriented data network (e.g., over IP). L2TP, as
58 originally defined in RFC 2661, is a standard method for
59 tunneling Point-to-Point Protocol (PPP) [RFC1661] sessions.
60 L2TP has since been adopted for tunneling a number of other
61 L2 protocols, including ATM, Frame Relay, HDLC and even raw
62 ethernet frames.
63
64 If you are connecting to L2TPv3 equipment, or you want to
65 tunnel raw ethernet frames using L2TP, say Y here. If
66 unsure, say N.
67
68config L2TP_IP
69 tristate "L2TP IP encapsulation for L2TPv3"
70 depends on L2TP_V3
71 help
72 Support for L2TP-over-IP socket family.
73
74 The L2TPv3 protocol defines two possible encapsulations for
75 L2TP frames, namely UDP and plain IP (without UDP). This
76 driver provides a new L2TPIP socket family with which
77 userspace L2TPv3 daemons may create L2TP/IP tunnel sockets
78 when UDP encapsulation is not required. When L2TP is carried
79 in IP packets, it used IP protocol number 115, so this port
80 must be enabled in firewalls.
81
82 To compile this driver as a module, choose M here. The module
83 will be called l2tp_ip.
84
85config L2TP_ETH
86 tristate "L2TP ethernet pseudowire support for L2TPv3"
87 depends on L2TP_V3
88 help
89 Support for carrying raw ethernet frames over L2TPv3.
90
91 From RFC 4719 <http://www.ietf.org/rfc/rfc4719.txt>.
92
93 The Layer 2 Tunneling Protocol, Version 3 (L2TPv3) can be
94 used as a control protocol and for data encapsulation to set
95 up Pseudowires for transporting layer 2 Packet Data Units
96 across an IP network [RFC3931].
97
98 This driver provides an ethernet virtual interface for each
99 L2TP ethernet pseudowire instance. Standard Linux tools may
100 be used to assign an IP address to the local virtual
101 interface, or add the interface to a bridge.
102
103 If you are using L2TPv3, you will almost certainly want to
104 enable this option.
105
106 To compile this driver as a module, choose M here. The module
107 will be called l2tp_eth.
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
new file mode 100644
index 000000000000..110e7bc2de5e
--- /dev/null
+++ b/net/l2tp/Makefile
@@ -0,0 +1,12 @@
1#
2# Makefile for the L2TP.
3#
4
5obj-$(CONFIG_L2TP) += l2tp_core.o
6
7# Build l2tp as modules if L2TP is M
8obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o
9obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
10obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
11obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
12obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
new file mode 100644
index 000000000000..ecc7aea9efe4
--- /dev/null
+++ b/net/l2tp/l2tp_core.c
@@ -0,0 +1,1693 @@
1/*
2 * L2TP core.
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This file contains some code of the original L2TPv2 pppol2tp
7 * driver, which has the following copyright:
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/module.h>
22#include <linux/string.h>
23#include <linux/list.h>
24#include <linux/rculist.h>
25#include <linux/uaccess.h>
26
27#include <linux/kernel.h>
28#include <linux/spinlock.h>
29#include <linux/kthread.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/errno.h>
33#include <linux/jiffies.h>
34
35#include <linux/netdevice.h>
36#include <linux/net.h>
37#include <linux/inetdevice.h>
38#include <linux/skbuff.h>
39#include <linux/init.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/udp.h>
43#include <linux/l2tp.h>
44#include <linux/hash.h>
45#include <linux/sort.h>
46#include <linux/file.h>
47#include <linux/nsproxy.h>
48#include <net/net_namespace.h>
49#include <net/netns/generic.h>
50#include <net/dst.h>
51#include <net/ip.h>
52#include <net/udp.h>
53#include <net/inet_common.h>
54#include <net/xfrm.h>
55#include <net/protocol.h>
56
57#include <asm/byteorder.h>
58#include <asm/atomic.h>
59
60#include "l2tp_core.h"
61
62#define L2TP_DRV_VERSION "V2.0"
63
64/* L2TP header constants */
65#define L2TP_HDRFLAG_T 0x8000
66#define L2TP_HDRFLAG_L 0x4000
67#define L2TP_HDRFLAG_S 0x0800
68#define L2TP_HDRFLAG_O 0x0200
69#define L2TP_HDRFLAG_P 0x0100
70
71#define L2TP_HDR_VER_MASK 0x000F
72#define L2TP_HDR_VER_2 0x0002
73#define L2TP_HDR_VER_3 0x0003
74
75/* L2TPv3 default L2-specific sublayer */
76#define L2TP_SLFLAG_S 0x40000000
77#define L2TP_SL_SEQ_MASK 0x00ffffff
78
79#define L2TP_HDR_SIZE_SEQ 10
80#define L2TP_HDR_SIZE_NOSEQ 6
81
82/* Default trace flags */
83#define L2TP_DEFAULT_DEBUG_FLAGS 0
84
85#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
86 do { \
87 if ((_mask) & (_type)) \
88 printk(_lvl "L2TP: " _fmt, ##args); \
89 } while (0)
90
91/* Private data stored for received packets in the skb.
92 */
93struct l2tp_skb_cb {
94 u32 ns;
95 u16 has_seq;
96 u16 length;
97 unsigned long expires;
98};
99
100#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
101
102static atomic_t l2tp_tunnel_count;
103static atomic_t l2tp_session_count;
104
105/* per-net private data for this module */
106static unsigned int l2tp_net_id;
107struct l2tp_net {
108 struct list_head l2tp_tunnel_list;
109 spinlock_t l2tp_tunnel_list_lock;
110 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
111 spinlock_t l2tp_session_hlist_lock;
112};
113
114static inline struct l2tp_net *l2tp_pernet(struct net *net)
115{
116 BUG_ON(!net);
117
118 return net_generic(net, l2tp_net_id);
119}
120
121/* Session hash global list for L2TPv3.
122 * The session_id SHOULD be random according to RFC3931, but several
123 * L2TP implementations use incrementing session_ids. So we do a real
124 * hash on the session_id, rather than a simple bitmask.
125 */
126static inline struct hlist_head *
127l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
128{
129 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
130
131}
132
133/* Lookup a session by id in the global session list
134 */
135static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
136{
137 struct l2tp_net *pn = l2tp_pernet(net);
138 struct hlist_head *session_list =
139 l2tp_session_id_hash_2(pn, session_id);
140 struct l2tp_session *session;
141 struct hlist_node *walk;
142
143 rcu_read_lock_bh();
144 hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
145 if (session->session_id == session_id) {
146 rcu_read_unlock_bh();
147 return session;
148 }
149 }
150 rcu_read_unlock_bh();
151
152 return NULL;
153}
154
155/* Session hash list.
156 * The session_id SHOULD be random according to RFC2661, but several
157 * L2TP implementations (Cisco and Microsoft) use incrementing
158 * session_ids. So we do a real hash on the session_id, rather than a
159 * simple bitmask.
160 */
161static inline struct hlist_head *
162l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
163{
164 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
165}
166
167/* Lookup a session by id
168 */
169struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
170{
171 struct hlist_head *session_list;
172 struct l2tp_session *session;
173 struct hlist_node *walk;
174
175 /* In L2TPv3, session_ids are unique over all tunnels and we
176 * sometimes need to look them up before we know the
177 * tunnel.
178 */
179 if (tunnel == NULL)
180 return l2tp_session_find_2(net, session_id);
181
182 session_list = l2tp_session_id_hash(tunnel, session_id);
183 read_lock_bh(&tunnel->hlist_lock);
184 hlist_for_each_entry(session, walk, session_list, hlist) {
185 if (session->session_id == session_id) {
186 read_unlock_bh(&tunnel->hlist_lock);
187 return session;
188 }
189 }
190 read_unlock_bh(&tunnel->hlist_lock);
191
192 return NULL;
193}
194EXPORT_SYMBOL_GPL(l2tp_session_find);
195
196struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
197{
198 int hash;
199 struct hlist_node *walk;
200 struct l2tp_session *session;
201 int count = 0;
202
203 read_lock_bh(&tunnel->hlist_lock);
204 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
205 hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
206 if (++count > nth) {
207 read_unlock_bh(&tunnel->hlist_lock);
208 return session;
209 }
210 }
211 }
212
213 read_unlock_bh(&tunnel->hlist_lock);
214
215 return NULL;
216}
217EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
218
219/* Lookup a session by interface name.
220 * This is very inefficient but is only used by management interfaces.
221 */
222struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
223{
224 struct l2tp_net *pn = l2tp_pernet(net);
225 int hash;
226 struct hlist_node *walk;
227 struct l2tp_session *session;
228
229 rcu_read_lock_bh();
230 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
231 hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
232 if (!strcmp(session->ifname, ifname)) {
233 rcu_read_unlock_bh();
234 return session;
235 }
236 }
237 }
238
239 rcu_read_unlock_bh();
240
241 return NULL;
242}
243EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
244
245/* Lookup a tunnel by id
246 */
247struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
248{
249 struct l2tp_tunnel *tunnel;
250 struct l2tp_net *pn = l2tp_pernet(net);
251
252 rcu_read_lock_bh();
253 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
254 if (tunnel->tunnel_id == tunnel_id) {
255 rcu_read_unlock_bh();
256 return tunnel;
257 }
258 }
259 rcu_read_unlock_bh();
260
261 return NULL;
262}
263EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
264
265struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
266{
267 struct l2tp_net *pn = l2tp_pernet(net);
268 struct l2tp_tunnel *tunnel;
269 int count = 0;
270
271 rcu_read_lock_bh();
272 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
273 if (++count > nth) {
274 rcu_read_unlock_bh();
275 return tunnel;
276 }
277 }
278
279 rcu_read_unlock_bh();
280
281 return NULL;
282}
283EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
284
285/*****************************************************************************
286 * Receive data handling
287 *****************************************************************************/
288
289/* Queue a skb in order. We come here only if the skb has an L2TP sequence
290 * number.
291 */
292static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
293{
294 struct sk_buff *skbp;
295 struct sk_buff *tmp;
296 u32 ns = L2TP_SKB_CB(skb)->ns;
297
298 spin_lock_bh(&session->reorder_q.lock);
299 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
300 if (L2TP_SKB_CB(skbp)->ns > ns) {
301 __skb_queue_before(&session->reorder_q, skbp, skb);
302 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
303 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
304 session->name, ns, L2TP_SKB_CB(skbp)->ns,
305 skb_queue_len(&session->reorder_q));
306 session->stats.rx_oos_packets++;
307 goto out;
308 }
309 }
310
311 __skb_queue_tail(&session->reorder_q, skb);
312
313out:
314 spin_unlock_bh(&session->reorder_q.lock);
315}
316
317/* Dequeue a single skb.
318 */
319static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
320{
321 struct l2tp_tunnel *tunnel = session->tunnel;
322 int length = L2TP_SKB_CB(skb)->length;
323
324 /* We're about to requeue the skb, so return resources
325 * to its current owner (a socket receive buffer).
326 */
327 skb_orphan(skb);
328
329 tunnel->stats.rx_packets++;
330 tunnel->stats.rx_bytes += length;
331 session->stats.rx_packets++;
332 session->stats.rx_bytes += length;
333
334 if (L2TP_SKB_CB(skb)->has_seq) {
335 /* Bump our Nr */
336 session->nr++;
337 if (tunnel->version == L2TP_HDR_VER_2)
338 session->nr &= 0xffff;
339 else
340 session->nr &= 0xffffff;
341
342 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
343 "%s: updated nr to %hu\n", session->name, session->nr);
344 }
345
346 /* call private receive handler */
347 if (session->recv_skb != NULL)
348 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
349 else
350 kfree_skb(skb);
351
352 if (session->deref)
353 (*session->deref)(session);
354}
355
356/* Dequeue skbs from the session's reorder_q, subject to packet order.
357 * Skbs that have been in the queue for too long are simply discarded.
358 */
359static void l2tp_recv_dequeue(struct l2tp_session *session)
360{
361 struct sk_buff *skb;
362 struct sk_buff *tmp;
363
364 /* If the pkt at the head of the queue has the nr that we
365 * expect to send up next, dequeue it and any other
366 * in-sequence packets behind it.
367 */
368 spin_lock_bh(&session->reorder_q.lock);
369 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
370 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
371 session->stats.rx_seq_discards++;
372 session->stats.rx_errors++;
373 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
374 "%s: oos pkt %u len %d discarded (too old), "
375 "waiting for %u, reorder_q_len=%d\n",
376 session->name, L2TP_SKB_CB(skb)->ns,
377 L2TP_SKB_CB(skb)->length, session->nr,
378 skb_queue_len(&session->reorder_q));
379 __skb_unlink(skb, &session->reorder_q);
380 kfree_skb(skb);
381 if (session->deref)
382 (*session->deref)(session);
383 continue;
384 }
385
386 if (L2TP_SKB_CB(skb)->has_seq) {
387 if (L2TP_SKB_CB(skb)->ns != session->nr) {
388 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
389 "%s: holding oos pkt %u len %d, "
390 "waiting for %u, reorder_q_len=%d\n",
391 session->name, L2TP_SKB_CB(skb)->ns,
392 L2TP_SKB_CB(skb)->length, session->nr,
393 skb_queue_len(&session->reorder_q));
394 goto out;
395 }
396 }
397 __skb_unlink(skb, &session->reorder_q);
398
399 /* Process the skb. We release the queue lock while we
400 * do so to let other contexts process the queue.
401 */
402 spin_unlock_bh(&session->reorder_q.lock);
403 l2tp_recv_dequeue_skb(session, skb);
404 spin_lock_bh(&session->reorder_q.lock);
405 }
406
407out:
408 spin_unlock_bh(&session->reorder_q.lock);
409}
410
411static inline int l2tp_verify_udp_checksum(struct sock *sk,
412 struct sk_buff *skb)
413{
414 struct udphdr *uh = udp_hdr(skb);
415 u16 ulen = ntohs(uh->len);
416 struct inet_sock *inet;
417 __wsum psum;
418
419 if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
420 return 0;
421
422 inet = inet_sk(sk);
423 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
424 IPPROTO_UDP, 0);
425
426 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
427 !csum_fold(csum_add(psum, skb->csum)))
428 return 0;
429
430 skb->csum = psum;
431
432 return __skb_checksum_complete(skb);
433}
434
435/* Do receive processing of L2TP data frames. We handle both L2TPv2
436 * and L2TPv3 data frames here.
437 *
438 * L2TPv2 Data Message Header
439 *
440 * 0 1 2 3
441 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
442 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
443 * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
444 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
445 * | Tunnel ID | Session ID |
446 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
447 * | Ns (opt) | Nr (opt) |
448 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
449 * | Offset Size (opt) | Offset pad... (opt)
450 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
451 *
452 * Data frames are marked by T=0. All other fields are the same as
453 * those in L2TP control frames.
454 *
455 * L2TPv3 Data Message Header
456 *
457 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
458 * | L2TP Session Header |
459 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
460 * | L2-Specific Sublayer |
461 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
462 * | Tunnel Payload ...
463 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
464 *
465 * L2TPv3 Session Header Over IP
466 *
467 * 0 1 2 3
468 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
469 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
470 * | Session ID |
471 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
472 * | Cookie (optional, maximum 64 bits)...
473 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
474 * |
475 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
476 *
477 * L2TPv3 L2-Specific Sublayer Format
478 *
479 * 0 1 2 3
480 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
481 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
482 * |x|S|x|x|x|x|x|x| Sequence Number |
483 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
484 *
485 * Cookie value, sublayer format and offset (pad) are negotiated with
486 * the peer when the session is set up. Unlike L2TPv2, we do not need
487 * to parse the packet header to determine if optional fields are
488 * present.
489 *
490 * Caller must already have parsed the frame and determined that it is
491 * a data (not control) frame before coming here. Fields up to the
492 * session-id have already been parsed and ptr points to the data
493 * after the session-id.
494 */
495void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
496 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
497 int length, int (*payload_hook)(struct sk_buff *skb))
498{
499 struct l2tp_tunnel *tunnel = session->tunnel;
500 int offset;
501 u32 ns, nr;
502
503 /* The ref count is increased since we now hold a pointer to
504 * the session. Take care to decrement the refcnt when exiting
505 * this function from now on...
506 */
507 l2tp_session_inc_refcount(session);
508 if (session->ref)
509 (*session->ref)(session);
510
511 /* Parse and check optional cookie */
512 if (session->peer_cookie_len > 0) {
513 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
514 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
515 "%s: cookie mismatch (%u/%u). Discarding.\n",
516 tunnel->name, tunnel->tunnel_id, session->session_id);
517 session->stats.rx_cookie_discards++;
518 goto discard;
519 }
520 ptr += session->peer_cookie_len;
521 }
522
523 /* Handle the optional sequence numbers. Sequence numbers are
524 * in different places for L2TPv2 and L2TPv3.
525 *
526 * If we are the LAC, enable/disable sequence numbers under
527 * the control of the LNS. If no sequence numbers present but
528 * we were expecting them, discard frame.
529 */
530 ns = nr = 0;
531 L2TP_SKB_CB(skb)->has_seq = 0;
532 if (tunnel->version == L2TP_HDR_VER_2) {
533 if (hdrflags & L2TP_HDRFLAG_S) {
534 ns = ntohs(*(__be16 *) ptr);
535 ptr += 2;
536 nr = ntohs(*(__be16 *) ptr);
537 ptr += 2;
538
539 /* Store L2TP info in the skb */
540 L2TP_SKB_CB(skb)->ns = ns;
541 L2TP_SKB_CB(skb)->has_seq = 1;
542
543 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
544 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
545 session->name, ns, nr, session->nr);
546 }
547 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
548 u32 l2h = ntohl(*(__be32 *) ptr);
549
550 if (l2h & 0x40000000) {
551 ns = l2h & 0x00ffffff;
552
553 /* Store L2TP info in the skb */
554 L2TP_SKB_CB(skb)->ns = ns;
555 L2TP_SKB_CB(skb)->has_seq = 1;
556
557 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
558 "%s: recv data ns=%u, session nr=%u\n",
559 session->name, ns, session->nr);
560 }
561 }
562
563 /* Advance past L2-specific header, if present */
564 ptr += session->l2specific_len;
565
566 if (L2TP_SKB_CB(skb)->has_seq) {
567 /* Received a packet with sequence numbers. If we're the LNS,
568 * check if we sre sending sequence numbers and if not,
569 * configure it so.
570 */
571 if ((!session->lns_mode) && (!session->send_seq)) {
572 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
573 "%s: requested to enable seq numbers by LNS\n",
574 session->name);
575 session->send_seq = -1;
576 l2tp_session_set_header_len(session, tunnel->version);
577 }
578 } else {
579 /* No sequence numbers.
580 * If user has configured mandatory sequence numbers, discard.
581 */
582 if (session->recv_seq) {
583 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
584 "%s: recv data has no seq numbers when required. "
585 "Discarding\n", session->name);
586 session->stats.rx_seq_discards++;
587 goto discard;
588 }
589
590 /* If we're the LAC and we're sending sequence numbers, the
591 * LNS has requested that we no longer send sequence numbers.
592 * If we're the LNS and we're sending sequence numbers, the
593 * LAC is broken. Discard the frame.
594 */
595 if ((!session->lns_mode) && (session->send_seq)) {
596 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
597 "%s: requested to disable seq numbers by LNS\n",
598 session->name);
599 session->send_seq = 0;
600 l2tp_session_set_header_len(session, tunnel->version);
601 } else if (session->send_seq) {
602 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
603 "%s: recv data has no seq numbers when required. "
604 "Discarding\n", session->name);
605 session->stats.rx_seq_discards++;
606 goto discard;
607 }
608 }
609
610 /* Session data offset is handled differently for L2TPv2 and
611 * L2TPv3. For L2TPv2, there is an optional 16-bit value in
612 * the header. For L2TPv3, the offset is negotiated using AVPs
613 * in the session setup control protocol.
614 */
615 if (tunnel->version == L2TP_HDR_VER_2) {
616 /* If offset bit set, skip it. */
617 if (hdrflags & L2TP_HDRFLAG_O) {
618 offset = ntohs(*(__be16 *)ptr);
619 ptr += 2 + offset;
620 }
621 } else
622 ptr += session->offset;
623
624 offset = ptr - optr;
625 if (!pskb_may_pull(skb, offset))
626 goto discard;
627
628 __skb_pull(skb, offset);
629
630 /* If caller wants to process the payload before we queue the
631 * packet, do so now.
632 */
633 if (payload_hook)
634 if ((*payload_hook)(skb))
635 goto discard;
636
637 /* Prepare skb for adding to the session's reorder_q. Hold
638 * packets for max reorder_timeout or 1 second if not
639 * reordering.
640 */
641 L2TP_SKB_CB(skb)->length = length;
642 L2TP_SKB_CB(skb)->expires = jiffies +
643 (session->reorder_timeout ? session->reorder_timeout : HZ);
644
645 /* Add packet to the session's receive queue. Reordering is done here, if
646 * enabled. Saved L2TP protocol info is stored in skb->sb[].
647 */
648 if (L2TP_SKB_CB(skb)->has_seq) {
649 if (session->reorder_timeout != 0) {
650 /* Packet reordering enabled. Add skb to session's
651 * reorder queue, in order of ns.
652 */
653 l2tp_recv_queue_skb(session, skb);
654 } else {
655 /* Packet reordering disabled. Discard out-of-sequence
656 * packets
657 */
658 if (L2TP_SKB_CB(skb)->ns != session->nr) {
659 session->stats.rx_seq_discards++;
660 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
661 "%s: oos pkt %u len %d discarded, "
662 "waiting for %u, reorder_q_len=%d\n",
663 session->name, L2TP_SKB_CB(skb)->ns,
664 L2TP_SKB_CB(skb)->length, session->nr,
665 skb_queue_len(&session->reorder_q));
666 goto discard;
667 }
668 skb_queue_tail(&session->reorder_q, skb);
669 }
670 } else {
671 /* No sequence numbers. Add the skb to the tail of the
672 * reorder queue. This ensures that it will be
673 * delivered after all previous sequenced skbs.
674 */
675 skb_queue_tail(&session->reorder_q, skb);
676 }
677
678 /* Try to dequeue as many skbs from reorder_q as we can. */
679 l2tp_recv_dequeue(session);
680
681 l2tp_session_dec_refcount(session);
682
683 return;
684
685discard:
686 session->stats.rx_errors++;
687 kfree_skb(skb);
688
689 if (session->deref)
690 (*session->deref)(session);
691
692 l2tp_session_dec_refcount(session);
693}
694EXPORT_SYMBOL(l2tp_recv_common);
695
696/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
697 * here. The skb is not on a list when we get here.
698 * Returns 0 if the packet was a data packet and was successfully passed on.
699 * Returns 1 if the packet was not a good data packet and could not be
700 * forwarded. All such packets are passed up to userspace to deal with.
701 */
702int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
703 int (*payload_hook)(struct sk_buff *skb))
704{
705 struct l2tp_session *session = NULL;
706 unsigned char *ptr, *optr;
707 u16 hdrflags;
708 u32 tunnel_id, session_id;
709 int offset;
710 u16 version;
711 int length;
712
713 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
714 goto discard_bad_csum;
715
716 /* UDP always verifies the packet length. */
717 __skb_pull(skb, sizeof(struct udphdr));
718
719 /* Short packet? */
720 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
721 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
722 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
723 goto error;
724 }
725
726 /* Point to L2TP header */
727 optr = ptr = skb->data;
728
729 /* Trace packet contents, if enabled */
730 if (tunnel->debug & L2TP_MSG_DATA) {
731 length = min(32u, skb->len);
732 if (!pskb_may_pull(skb, length))
733 goto error;
734
735 printk(KERN_DEBUG "%s: recv: ", tunnel->name);
736
737 offset = 0;
738 do {
739 printk(" %02X", ptr[offset]);
740 } while (++offset < length);
741
742 printk("\n");
743 }
744
745 /* Get L2TP header flags */
746 hdrflags = ntohs(*(__be16 *) ptr);
747
748 /* Check protocol version */
749 version = hdrflags & L2TP_HDR_VER_MASK;
750 if (version != tunnel->version) {
751 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
752 "%s: recv protocol version mismatch: got %d expected %d\n",
753 tunnel->name, version, tunnel->version);
754 goto error;
755 }
756
757 /* Get length of L2TP packet */
758 length = skb->len;
759
760 /* If type is control packet, it is handled by userspace. */
761 if (hdrflags & L2TP_HDRFLAG_T) {
762 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
763 "%s: recv control packet, len=%d\n", tunnel->name, length);
764 goto error;
765 }
766
767 /* Skip flags */
768 ptr += 2;
769
770 if (tunnel->version == L2TP_HDR_VER_2) {
771 /* If length is present, skip it */
772 if (hdrflags & L2TP_HDRFLAG_L)
773 ptr += 2;
774
775 /* Extract tunnel and session ID */
776 tunnel_id = ntohs(*(__be16 *) ptr);
777 ptr += 2;
778 session_id = ntohs(*(__be16 *) ptr);
779 ptr += 2;
780 } else {
781 ptr += 2; /* skip reserved bits */
782 tunnel_id = tunnel->tunnel_id;
783 session_id = ntohl(*(__be32 *) ptr);
784 ptr += 4;
785 }
786
787 /* Find the session context */
788 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
789 if (!session || !session->recv_skb) {
790 /* Not found? Pass to userspace to deal with */
791 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
792 "%s: no session found (%u/%u). Passing up.\n",
793 tunnel->name, tunnel_id, session_id);
794 goto error;
795 }
796
797 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
798
799 return 0;
800
801discard_bad_csum:
802 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
803 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
804 tunnel->stats.rx_errors++;
805 kfree_skb(skb);
806
807 return 0;
808
809error:
810 /* Put UDP header back */
811 __skb_push(skb, sizeof(struct udphdr));
812
813 return 1;
814}
815EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
816
817/* UDP encapsulation receive handler. See net/ipv4/udp.c.
818 * Return codes:
819 * 0 : success.
820 * <0: error
821 * >0: skb should be passed up to userspace as UDP.
822 */
823int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
824{
825 struct l2tp_tunnel *tunnel;
826
827 tunnel = l2tp_sock_to_tunnel(sk);
828 if (tunnel == NULL)
829 goto pass_up;
830
831 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
832 "%s: received %d bytes\n", tunnel->name, skb->len);
833
834 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
835 goto pass_up_put;
836
837 sock_put(sk);
838 return 0;
839
840pass_up_put:
841 sock_put(sk);
842pass_up:
843 return 1;
844}
845EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
846
847/************************************************************************
848 * Transmit handling
849 ***********************************************************************/
850
851/* Build an L2TP header for the session into the buffer provided.
852 */
853static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
854{
855 struct l2tp_tunnel *tunnel = session->tunnel;
856 __be16 *bufp = buf;
857 __be16 *optr = buf;
858 u16 flags = L2TP_HDR_VER_2;
859 u32 tunnel_id = tunnel->peer_tunnel_id;
860 u32 session_id = session->peer_session_id;
861
862 if (session->send_seq)
863 flags |= L2TP_HDRFLAG_S;
864
865 /* Setup L2TP header. */
866 *bufp++ = htons(flags);
867 *bufp++ = htons(tunnel_id);
868 *bufp++ = htons(session_id);
869 if (session->send_seq) {
870 *bufp++ = htons(session->ns);
871 *bufp++ = 0;
872 session->ns++;
873 session->ns &= 0xffff;
874 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
875 "%s: updated ns to %u\n", session->name, session->ns);
876 }
877
878 return bufp - optr;
879}
880
881static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
882{
883 struct l2tp_tunnel *tunnel = session->tunnel;
884 char *bufp = buf;
885 char *optr = bufp;
886
887 /* Setup L2TP header. The header differs slightly for UDP and
888 * IP encapsulations. For UDP, there is 4 bytes of flags.
889 */
890 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
891 u16 flags = L2TP_HDR_VER_3;
892 *((__be16 *) bufp) = htons(flags);
893 bufp += 2;
894 *((__be16 *) bufp) = 0;
895 bufp += 2;
896 }
897
898 *((__be32 *) bufp) = htonl(session->peer_session_id);
899 bufp += 4;
900 if (session->cookie_len) {
901 memcpy(bufp, &session->cookie[0], session->cookie_len);
902 bufp += session->cookie_len;
903 }
904 if (session->l2specific_len) {
905 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
906 u32 l2h = 0;
907 if (session->send_seq) {
908 l2h = 0x40000000 | session->ns;
909 session->ns++;
910 session->ns &= 0xffffff;
911 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
912 "%s: updated ns to %u\n", session->name, session->ns);
913 }
914
915 *((__be32 *) bufp) = htonl(l2h);
916 }
917 bufp += session->l2specific_len;
918 }
919 if (session->offset)
920 bufp += session->offset;
921
922 return bufp - optr;
923}
924
925int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
926{
927 struct l2tp_tunnel *tunnel = session->tunnel;
928 unsigned int len = skb->len;
929 int error;
930
931 /* Debug */
932 if (session->send_seq)
933 PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
934 "%s: send %Zd bytes, ns=%u\n", session->name,
935 data_len, session->ns - 1);
936 else
937 PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
938 "%s: send %Zd bytes\n", session->name, data_len);
939
940 if (session->debug & L2TP_MSG_DATA) {
941 int i;
942 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
943 unsigned char *datap = skb->data + uhlen;
944
945 printk(KERN_DEBUG "%s: xmit:", session->name);
946 for (i = 0; i < (len - uhlen); i++) {
947 printk(" %02X", *datap++);
948 if (i == 31) {
949 printk(" ...");
950 break;
951 }
952 }
953 printk("\n");
954 }
955
956 /* Queue the packet to IP for output */
957 skb->local_df = 1;
958 error = ip_queue_xmit(skb);
959
960 /* Update stats */
961 if (error >= 0) {
962 tunnel->stats.tx_packets++;
963 tunnel->stats.tx_bytes += len;
964 session->stats.tx_packets++;
965 session->stats.tx_bytes += len;
966 } else {
967 tunnel->stats.tx_errors++;
968 session->stats.tx_errors++;
969 }
970
971 return 0;
972}
973EXPORT_SYMBOL_GPL(l2tp_xmit_core);
974
975/* Automatically called when the skb is freed.
976 */
977static void l2tp_sock_wfree(struct sk_buff *skb)
978{
979 sock_put(skb->sk);
980}
981
982/* For data skbs that we transmit, we associate with the tunnel socket
983 * but don't do accounting.
984 */
985static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
986{
987 sock_hold(sk);
988 skb->sk = sk;
989 skb->destructor = l2tp_sock_wfree;
990}
991
992/* If caller requires the skb to have a ppp header, the header must be
993 * inserted in the skb data before calling this function.
994 */
995int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
996{
997 int data_len = skb->len;
998 struct l2tp_tunnel *tunnel = session->tunnel;
999 struct sock *sk = tunnel->sock;
1000 struct udphdr *uh;
1001 struct inet_sock *inet;
1002 __wsum csum;
1003 int old_headroom;
1004 int new_headroom;
1005 int headroom;
1006 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1007 int udp_len;
1008
1009 /* Check that there's enough headroom in the skb to insert IP,
1010 * UDP and L2TP headers. If not enough, expand it to
1011 * make room. Adjust truesize.
1012 */
1013 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1014 uhlen + hdr_len;
1015 old_headroom = skb_headroom(skb);
1016 if (skb_cow_head(skb, headroom))
1017 goto abort;
1018
1019 new_headroom = skb_headroom(skb);
1020 skb_orphan(skb);
1021 skb->truesize += new_headroom - old_headroom;
1022
1023 /* Setup L2TP header */
1024 session->build_header(session, __skb_push(skb, hdr_len));
1025
1026 /* Reset skb netfilter state */
1027 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1028 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1029 IPSKB_REROUTED);
1030 nf_reset(skb);
1031
1032 /* Get routing info from the tunnel socket */
1033 skb_dst_drop(skb);
1034 skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
1035
1036 switch (tunnel->encap) {
1037 case L2TP_ENCAPTYPE_UDP:
1038 /* Setup UDP header */
1039 inet = inet_sk(sk);
1040 __skb_push(skb, sizeof(*uh));
1041 skb_reset_transport_header(skb);
1042 uh = udp_hdr(skb);
1043 uh->source = inet->inet_sport;
1044 uh->dest = inet->inet_dport;
1045 udp_len = uhlen + hdr_len + data_len;
1046 uh->len = htons(udp_len);
1047 uh->check = 0;
1048
1049 /* Calculate UDP checksum if configured to do so */
1050 if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1051 skb->ip_summed = CHECKSUM_NONE;
1052 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1053 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1054 skb->ip_summed = CHECKSUM_COMPLETE;
1055 csum = skb_checksum(skb, 0, udp_len, 0);
1056 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1057 inet->inet_daddr,
1058 udp_len, IPPROTO_UDP, csum);
1059 if (uh->check == 0)
1060 uh->check = CSUM_MANGLED_0;
1061 } else {
1062 skb->ip_summed = CHECKSUM_PARTIAL;
1063 skb->csum_start = skb_transport_header(skb) - skb->head;
1064 skb->csum_offset = offsetof(struct udphdr, check);
1065 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1066 inet->inet_daddr,
1067 udp_len, IPPROTO_UDP, 0);
1068 }
1069 break;
1070
1071 case L2TP_ENCAPTYPE_IP:
1072 break;
1073 }
1074
1075 l2tp_skb_set_owner_w(skb, sk);
1076
1077 l2tp_xmit_core(session, skb, data_len);
1078
1079abort:
1080 return 0;
1081}
1082EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1083
1084/*****************************************************************************
1085 * Tinnel and session create/destroy.
1086 *****************************************************************************/
1087
1088/* Tunnel socket destruct hook.
1089 * The tunnel context is deleted only when all session sockets have been
1090 * closed.
1091 */
1092void l2tp_tunnel_destruct(struct sock *sk)
1093{
1094 struct l2tp_tunnel *tunnel;
1095
1096 tunnel = sk->sk_user_data;
1097 if (tunnel == NULL)
1098 goto end;
1099
1100 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1101 "%s: closing...\n", tunnel->name);
1102
1103 /* Close all sessions */
1104 l2tp_tunnel_closeall(tunnel);
1105
1106 switch (tunnel->encap) {
1107 case L2TP_ENCAPTYPE_UDP:
1108 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1109 (udp_sk(sk))->encap_type = 0;
1110 (udp_sk(sk))->encap_rcv = NULL;
1111 break;
1112 case L2TP_ENCAPTYPE_IP:
1113 break;
1114 }
1115
1116 /* Remove hooks into tunnel socket */
1117 tunnel->sock = NULL;
1118 sk->sk_destruct = tunnel->old_sk_destruct;
1119 sk->sk_user_data = NULL;
1120
1121 /* Call the original destructor */
1122 if (sk->sk_destruct)
1123 (*sk->sk_destruct)(sk);
1124
1125 /* We're finished with the socket */
1126 l2tp_tunnel_dec_refcount(tunnel);
1127
1128end:
1129 return;
1130}
1131EXPORT_SYMBOL(l2tp_tunnel_destruct);
1132
1133/* When the tunnel is closed, all the attached sessions need to go too.
1134 */
1135void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1136{
1137 int hash;
1138 struct hlist_node *walk;
1139 struct hlist_node *tmp;
1140 struct l2tp_session *session;
1141
1142 BUG_ON(tunnel == NULL);
1143
1144 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1145 "%s: closing all sessions...\n", tunnel->name);
1146
1147 write_lock_bh(&tunnel->hlist_lock);
1148 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1149again:
1150 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1151 session = hlist_entry(walk, struct l2tp_session, hlist);
1152
1153 PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
1154 "%s: closing session\n", session->name);
1155
1156 hlist_del_init(&session->hlist);
1157
1158 /* Since we should hold the sock lock while
1159 * doing any unbinding, we need to release the
1160 * lock we're holding before taking that lock.
1161 * Hold a reference to the sock so it doesn't
1162 * disappear as we're jumping between locks.
1163 */
1164 if (session->ref != NULL)
1165 (*session->ref)(session);
1166
1167 write_unlock_bh(&tunnel->hlist_lock);
1168
1169 if (tunnel->version != L2TP_HDR_VER_2) {
1170 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1171
1172 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1173 hlist_del_init_rcu(&session->global_hlist);
1174 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1175 synchronize_rcu();
1176 }
1177
1178 if (session->session_close != NULL)
1179 (*session->session_close)(session);
1180
1181 if (session->deref != NULL)
1182 (*session->deref)(session);
1183
1184 write_lock_bh(&tunnel->hlist_lock);
1185
1186 /* Now restart from the beginning of this hash
1187 * chain. We always remove a session from the
1188 * list so we are guaranteed to make forward
1189 * progress.
1190 */
1191 goto again;
1192 }
1193 }
1194 write_unlock_bh(&tunnel->hlist_lock);
1195}
1196EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1197
1198/* Really kill the tunnel.
1199 * Come here only when all sessions have been cleared from the tunnel.
1200 */
1201void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1202{
1203 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1204
1205 BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1206 BUG_ON(tunnel->sock != NULL);
1207
1208 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1209 "%s: free...\n", tunnel->name);
1210
1211 /* Remove from tunnel list */
1212 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1213 list_del_rcu(&tunnel->list);
1214 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1215 synchronize_rcu();
1216
1217 atomic_dec(&l2tp_tunnel_count);
1218 kfree(tunnel);
1219}
1220EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
1221
1222/* Create a socket for the tunnel, if one isn't set up by
1223 * userspace. This is used for static tunnels where there is no
1224 * managing L2TP daemon.
1225 */
1226static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp)
1227{
1228 int err = -EINVAL;
1229 struct sockaddr_in udp_addr;
1230 struct sockaddr_l2tpip ip_addr;
1231 struct socket *sock = NULL;
1232
1233 switch (cfg->encap) {
1234 case L2TP_ENCAPTYPE_UDP:
1235 err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
1236 if (err < 0)
1237 goto out;
1238
1239 sock = *sockp;
1240
1241 memset(&udp_addr, 0, sizeof(udp_addr));
1242 udp_addr.sin_family = AF_INET;
1243 udp_addr.sin_addr = cfg->local_ip;
1244 udp_addr.sin_port = htons(cfg->local_udp_port);
1245 err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr));
1246 if (err < 0)
1247 goto out;
1248
1249 udp_addr.sin_family = AF_INET;
1250 udp_addr.sin_addr = cfg->peer_ip;
1251 udp_addr.sin_port = htons(cfg->peer_udp_port);
1252 err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0);
1253 if (err < 0)
1254 goto out;
1255
1256 if (!cfg->use_udp_checksums)
1257 sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
1258
1259 break;
1260
1261 case L2TP_ENCAPTYPE_IP:
1262 err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp);
1263 if (err < 0)
1264 goto out;
1265
1266 sock = *sockp;
1267
1268 memset(&ip_addr, 0, sizeof(ip_addr));
1269 ip_addr.l2tp_family = AF_INET;
1270 ip_addr.l2tp_addr = cfg->local_ip;
1271 ip_addr.l2tp_conn_id = tunnel_id;
1272 err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr));
1273 if (err < 0)
1274 goto out;
1275
1276 ip_addr.l2tp_family = AF_INET;
1277 ip_addr.l2tp_addr = cfg->peer_ip;
1278 ip_addr.l2tp_conn_id = peer_tunnel_id;
1279 err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0);
1280 if (err < 0)
1281 goto out;
1282
1283 break;
1284
1285 default:
1286 goto out;
1287 }
1288
1289out:
1290 if ((err < 0) && sock) {
1291 sock_release(sock);
1292 *sockp = NULL;
1293 }
1294
1295 return err;
1296}
1297
1298int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1299{
1300 struct l2tp_tunnel *tunnel = NULL;
1301 int err;
1302 struct socket *sock = NULL;
1303 struct sock *sk = NULL;
1304 struct l2tp_net *pn;
1305 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1306
1307 /* Get the tunnel socket from the fd, which was opened by
1308 * the userspace L2TP daemon. If not specified, create a
1309 * kernel socket.
1310 */
1311 if (fd < 0) {
1312 err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock);
1313 if (err < 0)
1314 goto err;
1315 } else {
1316 err = -EBADF;
1317 sock = sockfd_lookup(fd, &err);
1318 if (!sock) {
1319 printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
1320 tunnel_id, fd, err);
1321 goto err;
1322 }
1323 }
1324
1325 sk = sock->sk;
1326
1327 if (cfg != NULL)
1328 encap = cfg->encap;
1329
1330 /* Quick sanity checks */
1331 switch (encap) {
1332 case L2TP_ENCAPTYPE_UDP:
1333 err = -EPROTONOSUPPORT;
1334 if (sk->sk_protocol != IPPROTO_UDP) {
1335 printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1336 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1337 goto err;
1338 }
1339 break;
1340 case L2TP_ENCAPTYPE_IP:
1341 err = -EPROTONOSUPPORT;
1342 if (sk->sk_protocol != IPPROTO_L2TP) {
1343 printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1344 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1345 goto err;
1346 }
1347 break;
1348 }
1349
1350 /* Check if this socket has already been prepped */
1351 tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
1352 if (tunnel != NULL) {
1353 /* This socket has already been prepped */
1354 err = -EBUSY;
1355 goto err;
1356 }
1357
1358 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1359 if (tunnel == NULL) {
1360 err = -ENOMEM;
1361 goto err;
1362 }
1363
1364 tunnel->version = version;
1365 tunnel->tunnel_id = tunnel_id;
1366 tunnel->peer_tunnel_id = peer_tunnel_id;
1367 tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1368
1369 tunnel->magic = L2TP_TUNNEL_MAGIC;
1370 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1371 rwlock_init(&tunnel->hlist_lock);
1372
1373 /* The net we belong to */
1374 tunnel->l2tp_net = net;
1375 pn = l2tp_pernet(net);
1376
1377 if (cfg != NULL)
1378 tunnel->debug = cfg->debug;
1379
1380 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1381 tunnel->encap = encap;
1382 if (encap == L2TP_ENCAPTYPE_UDP) {
1383 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1384 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1385 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1386 }
1387
1388 sk->sk_user_data = tunnel;
1389
1390 /* Hook on the tunnel socket destructor so that we can cleanup
1391 * if the tunnel socket goes away.
1392 */
1393 tunnel->old_sk_destruct = sk->sk_destruct;
1394 sk->sk_destruct = &l2tp_tunnel_destruct;
1395 tunnel->sock = sk;
1396 sk->sk_allocation = GFP_ATOMIC;
1397
1398 /* Add tunnel to our list */
1399 INIT_LIST_HEAD(&tunnel->list);
1400 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1401 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1402 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1403 synchronize_rcu();
1404 atomic_inc(&l2tp_tunnel_count);
1405
1406 /* Bump the reference count. The tunnel context is deleted
1407 * only when this drops to zero.
1408 */
1409 l2tp_tunnel_inc_refcount(tunnel);
1410
1411 err = 0;
1412err:
1413 if (tunnelp)
1414 *tunnelp = tunnel;
1415
1416 /* If tunnel's socket was created by the kernel, it doesn't
1417 * have a file.
1418 */
1419 if (sock && sock->file)
1420 sockfd_put(sock);
1421
1422 return err;
1423}
1424EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1425
1426/* This function is used by the netlink TUNNEL_DELETE command.
1427 */
1428int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1429{
1430 int err = 0;
1431 struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
1432
1433 /* Force the tunnel socket to close. This will eventually
1434 * cause the tunnel to be deleted via the normal socket close
1435 * mechanisms when userspace closes the tunnel socket.
1436 */
1437 if (sock != NULL) {
1438 err = inet_shutdown(sock, 2);
1439
1440 /* If the tunnel's socket was created by the kernel,
1441 * close the socket here since the socket was not
1442 * created by userspace.
1443 */
1444 if (sock->file == NULL)
1445 err = inet_release(sock);
1446 }
1447
1448 return err;
1449}
1450EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1451
1452/* Really kill the session.
1453 */
1454void l2tp_session_free(struct l2tp_session *session)
1455{
1456 struct l2tp_tunnel *tunnel;
1457
1458 BUG_ON(atomic_read(&session->ref_count) != 0);
1459
1460 tunnel = session->tunnel;
1461 if (tunnel != NULL) {
1462 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1463
1464 /* Delete the session from the hash */
1465 write_lock_bh(&tunnel->hlist_lock);
1466 hlist_del_init(&session->hlist);
1467 write_unlock_bh(&tunnel->hlist_lock);
1468
1469 /* Unlink from the global hash if not L2TPv2 */
1470 if (tunnel->version != L2TP_HDR_VER_2) {
1471 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1472
1473 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1474 hlist_del_init_rcu(&session->global_hlist);
1475 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1476 synchronize_rcu();
1477 }
1478
1479 if (session->session_id != 0)
1480 atomic_dec(&l2tp_session_count);
1481
1482 sock_put(tunnel->sock);
1483
1484 /* This will delete the tunnel context if this
1485 * is the last session on the tunnel.
1486 */
1487 session->tunnel = NULL;
1488 l2tp_tunnel_dec_refcount(tunnel);
1489 }
1490
1491 kfree(session);
1492
1493 return;
1494}
1495EXPORT_SYMBOL_GPL(l2tp_session_free);
1496
1497/* This function is used by the netlink SESSION_DELETE command and by
1498 pseudowire modules.
1499 */
1500int l2tp_session_delete(struct l2tp_session *session)
1501{
1502 if (session->session_close != NULL)
1503 (*session->session_close)(session);
1504
1505 l2tp_session_dec_refcount(session);
1506
1507 return 0;
1508}
1509EXPORT_SYMBOL_GPL(l2tp_session_delete);
1510
1511
1512/* We come here whenever a session's send_seq, cookie_len or
1513 * l2specific_len parameters are set.
1514 */
1515void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1516{
1517 if (version == L2TP_HDR_VER_2) {
1518 session->hdr_len = 6;
1519 if (session->send_seq)
1520 session->hdr_len += 4;
1521 } else {
1522 session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1523 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1524 session->hdr_len += 4;
1525 }
1526
1527}
1528EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1529
1530struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1531{
1532 struct l2tp_session *session;
1533
1534 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1535 if (session != NULL) {
1536 session->magic = L2TP_SESSION_MAGIC;
1537 session->tunnel = tunnel;
1538
1539 session->session_id = session_id;
1540 session->peer_session_id = peer_session_id;
1541 session->nr = 1;
1542
1543 sprintf(&session->name[0], "sess %u/%u",
1544 tunnel->tunnel_id, session->session_id);
1545
1546 skb_queue_head_init(&session->reorder_q);
1547
1548 INIT_HLIST_NODE(&session->hlist);
1549 INIT_HLIST_NODE(&session->global_hlist);
1550
1551 /* Inherit debug options from tunnel */
1552 session->debug = tunnel->debug;
1553
1554 if (cfg) {
1555 session->pwtype = cfg->pw_type;
1556 session->debug = cfg->debug;
1557 session->mtu = cfg->mtu;
1558 session->mru = cfg->mru;
1559 session->send_seq = cfg->send_seq;
1560 session->recv_seq = cfg->recv_seq;
1561 session->lns_mode = cfg->lns_mode;
1562 session->reorder_timeout = cfg->reorder_timeout;
1563 session->offset = cfg->offset;
1564 session->l2specific_type = cfg->l2specific_type;
1565 session->l2specific_len = cfg->l2specific_len;
1566 session->cookie_len = cfg->cookie_len;
1567 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1568 session->peer_cookie_len = cfg->peer_cookie_len;
1569 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1570 }
1571
1572 if (tunnel->version == L2TP_HDR_VER_2)
1573 session->build_header = l2tp_build_l2tpv2_header;
1574 else
1575 session->build_header = l2tp_build_l2tpv3_header;
1576
1577 l2tp_session_set_header_len(session, tunnel->version);
1578
1579 /* Bump the reference count. The session context is deleted
1580 * only when this drops to zero.
1581 */
1582 l2tp_session_inc_refcount(session);
1583 l2tp_tunnel_inc_refcount(tunnel);
1584
1585 /* Ensure tunnel socket isn't deleted */
1586 sock_hold(tunnel->sock);
1587
1588 /* Add session to the tunnel's hash list */
1589 write_lock_bh(&tunnel->hlist_lock);
1590 hlist_add_head(&session->hlist,
1591 l2tp_session_id_hash(tunnel, session_id));
1592 write_unlock_bh(&tunnel->hlist_lock);
1593
1594 /* And to the global session list if L2TPv3 */
1595 if (tunnel->version != L2TP_HDR_VER_2) {
1596 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1597
1598 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1599 hlist_add_head_rcu(&session->global_hlist,
1600 l2tp_session_id_hash_2(pn, session_id));
1601 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1602 synchronize_rcu();
1603 }
1604
1605 /* Ignore management session in session count value */
1606 if (session->session_id != 0)
1607 atomic_inc(&l2tp_session_count);
1608 }
1609
1610 return session;
1611}
1612EXPORT_SYMBOL_GPL(l2tp_session_create);
1613
1614/*****************************************************************************
1615 * Init and cleanup
1616 *****************************************************************************/
1617
1618static __net_init int l2tp_init_net(struct net *net)
1619{
1620 struct l2tp_net *pn;
1621 int err;
1622 int hash;
1623
1624 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
1625 if (!pn)
1626 return -ENOMEM;
1627
1628 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1629 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1630
1631 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1632 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1633
1634 spin_lock_init(&pn->l2tp_session_hlist_lock);
1635
1636 err = net_assign_generic(net, l2tp_net_id, pn);
1637 if (err)
1638 goto out;
1639
1640 return 0;
1641
1642out:
1643 kfree(pn);
1644 return err;
1645}
1646
1647static __net_exit void l2tp_exit_net(struct net *net)
1648{
1649 struct l2tp_net *pn;
1650
1651 pn = net_generic(net, l2tp_net_id);
1652 /*
1653 * if someone has cached our net then
1654 * further net_generic call will return NULL
1655 */
1656 net_assign_generic(net, l2tp_net_id, NULL);
1657 kfree(pn);
1658}
1659
1660static struct pernet_operations l2tp_net_ops = {
1661 .init = l2tp_init_net,
1662 .exit = l2tp_exit_net,
1663 .id = &l2tp_net_id,
1664 .size = sizeof(struct l2tp_net),
1665};
1666
1667static int __init l2tp_init(void)
1668{
1669 int rc = 0;
1670
1671 rc = register_pernet_device(&l2tp_net_ops);
1672 if (rc)
1673 goto out;
1674
1675 printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
1676
1677out:
1678 return rc;
1679}
1680
1681static void __exit l2tp_exit(void)
1682{
1683 unregister_pernet_device(&l2tp_net_ops);
1684}
1685
1686module_init(l2tp_init);
1687module_exit(l2tp_exit);
1688
1689MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1690MODULE_DESCRIPTION("L2TP core");
1691MODULE_LICENSE("GPL");
1692MODULE_VERSION(L2TP_DRV_VERSION);
1693
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
new file mode 100644
index 000000000000..f0f318edd3f1
--- /dev/null
+++ b/net/l2tp/l2tp_core.h
@@ -0,0 +1,304 @@
1/*
2 * L2TP internal definitions.
3 *
4 * Copyright (c) 2008,2009 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _L2TP_CORE_H_
12#define _L2TP_CORE_H_
13
14/* Just some random numbers */
15#define L2TP_TUNNEL_MAGIC 0x42114DDA
16#define L2TP_SESSION_MAGIC 0x0C04EB7D
17
18/* Per tunnel, session hash table size */
19#define L2TP_HASH_BITS 4
20#define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS)
21
22/* System-wide, session hash table size */
23#define L2TP_HASH_BITS_2 8
24#define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2)
25
26/* Debug message categories for the DEBUG socket option */
27enum {
28 L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
29 * compiled in) */
30 L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
31 * interface */
32 L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
33 L2TP_MSG_DATA = (1 << 3), /* data packets */
34};
35
36struct sk_buff;
37
38struct l2tp_stats {
39 u64 tx_packets;
40 u64 tx_bytes;
41 u64 tx_errors;
42 u64 rx_packets;
43 u64 rx_bytes;
44 u64 rx_seq_discards;
45 u64 rx_oos_packets;
46 u64 rx_errors;
47 u64 rx_cookie_discards;
48};
49
50struct l2tp_tunnel;
51
52/* Describes a session. Contains information to determine incoming
53 * packets and transmit outgoing ones.
54 */
55struct l2tp_session_cfg {
56 enum l2tp_pwtype pw_type;
57 unsigned data_seq:2; /* data sequencing level
58 * 0 => none, 1 => IP only,
59 * 2 => all
60 */
61 unsigned recv_seq:1; /* expect receive packets with
62 * sequence numbers? */
63 unsigned send_seq:1; /* send packets with sequence
64 * numbers? */
65 unsigned lns_mode:1; /* behave as LNS? LAC enables
66 * sequence numbers under
67 * control of LNS. */
68 int debug; /* bitmask of debug message
69 * categories */
70 u16 vlan_id; /* VLAN pseudowire only */
71 u16 offset; /* offset to payload */
72 u16 l2specific_len; /* Layer 2 specific length */
73 u16 l2specific_type; /* Layer 2 specific type */
74 u8 cookie[8]; /* optional cookie */
75 int cookie_len; /* 0, 4 or 8 bytes */
76 u8 peer_cookie[8]; /* peer's cookie */
77 int peer_cookie_len; /* 0, 4 or 8 bytes */
78 int reorder_timeout; /* configured reorder timeout
79 * (in jiffies) */
80 int mtu;
81 int mru;
82 char *ifname;
83};
84
85struct l2tp_session {
86 int magic; /* should be
87 * L2TP_SESSION_MAGIC */
88
89 struct l2tp_tunnel *tunnel; /* back pointer to tunnel
90 * context */
91 u32 session_id;
92 u32 peer_session_id;
93 u8 cookie[8];
94 int cookie_len;
95 u8 peer_cookie[8];
96 int peer_cookie_len;
97 u16 offset; /* offset from end of L2TP header
98 to beginning of data */
99 u16 l2specific_len;
100 u16 l2specific_type;
101 u16 hdr_len;
102 u32 nr; /* session NR state (receive) */
103 u32 ns; /* session NR state (send) */
104 struct sk_buff_head reorder_q; /* receive reorder queue */
105 struct hlist_node hlist; /* Hash list node */
106 atomic_t ref_count;
107
108 char name[32]; /* for logging */
109 char ifname[IFNAMSIZ];
110 unsigned data_seq:2; /* data sequencing level
111 * 0 => none, 1 => IP only,
112 * 2 => all
113 */
114 unsigned recv_seq:1; /* expect receive packets with
115 * sequence numbers? */
116 unsigned send_seq:1; /* send packets with sequence
117 * numbers? */
118 unsigned lns_mode:1; /* behave as LNS? LAC enables
119 * sequence numbers under
120 * control of LNS. */
121 int debug; /* bitmask of debug message
122 * categories */
123 int reorder_timeout; /* configured reorder timeout
124 * (in jiffies) */
125 int mtu;
126 int mru;
127 enum l2tp_pwtype pwtype;
128 struct l2tp_stats stats;
129 struct hlist_node global_hlist; /* Global hash list node */
130
131 int (*build_header)(struct l2tp_session *session, void *buf);
132 void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
133 void (*session_close)(struct l2tp_session *session);
134 void (*ref)(struct l2tp_session *session);
135 void (*deref)(struct l2tp_session *session);
136#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
137 void (*show)(struct seq_file *m, void *priv);
138#endif
139 uint8_t priv[0]; /* private data */
140};
141
142/* Describes the tunnel. It contains info to track all the associated
143 * sessions so incoming packets can be sorted out
144 */
145struct l2tp_tunnel_cfg {
146 int debug; /* bitmask of debug message
147 * categories */
148 enum l2tp_encap_type encap;
149
150 /* Used only for kernel-created sockets */
151 struct in_addr local_ip;
152 struct in_addr peer_ip;
153 u16 local_udp_port;
154 u16 peer_udp_port;
155 unsigned int use_udp_checksums:1;
156};
157
158struct l2tp_tunnel {
159 int magic; /* Should be L2TP_TUNNEL_MAGIC */
160 rwlock_t hlist_lock; /* protect session_hlist */
161 struct hlist_head session_hlist[L2TP_HASH_SIZE];
162 /* hashed list of sessions,
163 * hashed by id */
164 u32 tunnel_id;
165 u32 peer_tunnel_id;
166 int version; /* 2=>L2TPv2, 3=>L2TPv3 */
167
168 char name[20]; /* for logging */
169 int debug; /* bitmask of debug message
170 * categories */
171 enum l2tp_encap_type encap;
172 struct l2tp_stats stats;
173
174 struct list_head list; /* Keep a list of all tunnels */
175 struct net *l2tp_net; /* the net we belong to */
176
177 atomic_t ref_count;
178#ifdef CONFIG_DEBUG_FS
179 void (*show)(struct seq_file *m, void *arg);
180#endif
181 int (*recv_payload_hook)(struct sk_buff *skb);
182 void (*old_sk_destruct)(struct sock *);
183 struct sock *sock; /* Parent socket */
184 int fd;
185
186 uint8_t priv[0]; /* private data */
187};
188
189struct l2tp_nl_cmd_ops {
190 int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
191 int (*session_delete)(struct l2tp_session *session);
192};
193
194static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel)
195{
196 return &tunnel->priv[0];
197}
198
199static inline void *l2tp_session_priv(struct l2tp_session *session)
200{
201 return &session->priv[0];
202}
203
204static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
205{
206 struct l2tp_tunnel *tunnel;
207
208 if (sk == NULL)
209 return NULL;
210
211 sock_hold(sk);
212 tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
213 if (tunnel == NULL) {
214 sock_put(sk);
215 goto out;
216 }
217
218 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
219
220out:
221 return tunnel;
222}
223
224extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
225extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
226extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
227extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
228extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
229
230extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
231extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
232extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
233extern int l2tp_session_delete(struct l2tp_session *session);
234extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
235extern void l2tp_session_free(struct l2tp_session *session);
236extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
237extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
238extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
239
240extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
241extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
242extern void l2tp_tunnel_destruct(struct sock *sk);
243extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
244extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
245
246extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
247extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
248
249/* Tunnel reference counts. Incremented per session that is added to
250 * the tunnel.
251 */
252static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
253{
254 atomic_inc(&tunnel->ref_count);
255}
256
257static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
258{
259 if (atomic_dec_and_test(&tunnel->ref_count))
260 l2tp_tunnel_free(tunnel);
261}
262#ifdef L2TP_REFCNT_DEBUG
263#define l2tp_tunnel_inc_refcount(_t) do { \
264 printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
265 l2tp_tunnel_inc_refcount_1(_t); \
266 } while (0)
267#define l2tp_tunnel_dec_refcount(_t) do { \
268 printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
269 l2tp_tunnel_dec_refcount_1(_t); \
270 } while (0)
271#else
272#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
273#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
274#endif
275
276/* Session reference counts. Incremented when code obtains a reference
277 * to a session.
278 */
279static inline void l2tp_session_inc_refcount_1(struct l2tp_session *session)
280{
281 atomic_inc(&session->ref_count);
282}
283
284static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
285{
286 if (atomic_dec_and_test(&session->ref_count))
287 l2tp_session_free(session);
288}
289
290#ifdef L2TP_REFCNT_DEBUG
291#define l2tp_session_inc_refcount(_s) do { \
292 printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
293 l2tp_session_inc_refcount_1(_s); \
294 } while (0)
295#define l2tp_session_dec_refcount(_s) do { \
296 printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
297 l2tp_session_dec_refcount_1(_s); \
298 } while (0)
299#else
300#define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s)
301#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
302#endif
303
304#endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
new file mode 100644
index 000000000000..104ec3b283d4
--- /dev/null
+++ b/net/l2tp/l2tp_debugfs.c
@@ -0,0 +1,341 @@
1/*
2 * L2TP subsystem debugfs
3 *
4 * Copyright (c) 2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/socket.h>
15#include <linux/hash.h>
16#include <linux/l2tp.h>
17#include <linux/in.h>
18#include <linux/etherdevice.h>
19#include <linux/spinlock.h>
20#include <linux/debugfs.h>
21#include <net/sock.h>
22#include <net/ip.h>
23#include <net/icmp.h>
24#include <net/udp.h>
25#include <net/inet_common.h>
26#include <net/inet_hashtables.h>
27#include <net/tcp_states.h>
28#include <net/protocol.h>
29#include <net/xfrm.h>
30#include <net/net_namespace.h>
31#include <net/netns/generic.h>
32
33#include "l2tp_core.h"
34
35static struct dentry *rootdir;
36static struct dentry *tunnels;
37
38struct l2tp_dfs_seq_data {
39 struct net *net;
40 int tunnel_idx; /* current tunnel */
41 int session_idx; /* index of session within current tunnel */
42 struct l2tp_tunnel *tunnel;
43 struct l2tp_session *session; /* NULL means get next tunnel */
44};
45
46static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
47{
48 pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx);
49 pd->tunnel_idx++;
50}
51
52static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
53{
54 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
55 pd->session_idx++;
56
57 if (pd->session == NULL) {
58 pd->session_idx = 0;
59 l2tp_dfs_next_tunnel(pd);
60 }
61
62}
63
64static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs)
65{
66 struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN;
67 loff_t pos = *offs;
68
69 if (!pos)
70 goto out;
71
72 BUG_ON(m->private == NULL);
73 pd = m->private;
74
75 if (pd->tunnel == NULL)
76 l2tp_dfs_next_tunnel(pd);
77 else
78 l2tp_dfs_next_session(pd);
79
80 /* NULL tunnel and session indicates end of list */
81 if ((pd->tunnel == NULL) && (pd->session == NULL))
82 pd = NULL;
83
84out:
85 return pd;
86}
87
88
89static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
90{
91 (*pos)++;
92 return NULL;
93}
94
95static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
96{
97 /* nothing to do */
98}
99
100static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
101{
102 struct l2tp_tunnel *tunnel = v;
103 int session_count = 0;
104 int hash;
105 struct hlist_node *walk;
106 struct hlist_node *tmp;
107
108 read_lock_bh(&tunnel->hlist_lock);
109 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
110 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
111 struct l2tp_session *session;
112
113 session = hlist_entry(walk, struct l2tp_session, hlist);
114 if (session->session_id == 0)
115 continue;
116
117 session_count++;
118 }
119 }
120 read_unlock_bh(&tunnel->hlist_lock);
121
122 seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
123 if (tunnel->sock) {
124 struct inet_sock *inet = inet_sk(tunnel->sock);
125 seq_printf(m, " from %pI4 to %pI4\n",
126 &inet->inet_saddr, &inet->inet_daddr);
127 if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
128 seq_printf(m, " source port %hu, dest port %hu\n",
129 ntohs(inet->inet_sport), ntohs(inet->inet_dport));
130 }
131 seq_printf(m, " L2TPv%d, %s\n", tunnel->version,
132 tunnel->encap == L2TP_ENCAPTYPE_UDP ? "UDP" :
133 tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
134 "");
135 seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
136 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
137 atomic_read(&tunnel->ref_count));
138
139 seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n",
140 tunnel->debug,
141 (unsigned long long)tunnel->stats.tx_packets,
142 (unsigned long long)tunnel->stats.tx_bytes,
143 (unsigned long long)tunnel->stats.tx_errors,
144 (unsigned long long)tunnel->stats.rx_packets,
145 (unsigned long long)tunnel->stats.rx_bytes,
146 (unsigned long long)tunnel->stats.rx_errors);
147
148 if (tunnel->show != NULL)
149 tunnel->show(m, tunnel);
150}
151
152static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
153{
154 struct l2tp_session *session = v;
155
156 seq_printf(m, " SESSION %u, peer %u, %s\n", session->session_id,
157 session->peer_session_id,
158 session->pwtype == L2TP_PWTYPE_ETH ? "ETH" :
159 session->pwtype == L2TP_PWTYPE_PPP ? "PPP" :
160 "");
161 if (session->send_seq || session->recv_seq)
162 seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns);
163 seq_printf(m, " refcnt %d\n", atomic_read(&session->ref_count));
164 seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n",
165 session->mtu, session->mru,
166 session->recv_seq ? 'R' : '-',
167 session->send_seq ? 'S' : '-',
168 session->data_seq == 1 ? "IPSEQ" :
169 session->data_seq == 2 ? "DATASEQ" : "-",
170 session->lns_mode ? "LNS" : "LAC",
171 session->debug,
172 jiffies_to_msecs(session->reorder_timeout));
173 seq_printf(m, " offset %hu l2specific %hu/%hu\n",
174 session->offset, session->l2specific_type, session->l2specific_len);
175 if (session->cookie_len) {
176 seq_printf(m, " cookie %02x%02x%02x%02x",
177 session->cookie[0], session->cookie[1],
178 session->cookie[2], session->cookie[3]);
179 if (session->cookie_len == 8)
180 seq_printf(m, "%02x%02x%02x%02x",
181 session->cookie[4], session->cookie[5],
182 session->cookie[6], session->cookie[7]);
183 seq_printf(m, "\n");
184 }
185 if (session->peer_cookie_len) {
186 seq_printf(m, " peer cookie %02x%02x%02x%02x",
187 session->peer_cookie[0], session->peer_cookie[1],
188 session->peer_cookie[2], session->peer_cookie[3]);
189 if (session->peer_cookie_len == 8)
190 seq_printf(m, "%02x%02x%02x%02x",
191 session->peer_cookie[4], session->peer_cookie[5],
192 session->peer_cookie[6], session->peer_cookie[7]);
193 seq_printf(m, "\n");
194 }
195
196 seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n",
197 session->nr, session->ns,
198 (unsigned long long)session->stats.tx_packets,
199 (unsigned long long)session->stats.tx_bytes,
200 (unsigned long long)session->stats.tx_errors,
201 (unsigned long long)session->stats.rx_packets,
202 (unsigned long long)session->stats.rx_bytes,
203 (unsigned long long)session->stats.rx_errors);
204
205 if (session->show != NULL)
206 session->show(m, session);
207}
208
209static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
210{
211 struct l2tp_dfs_seq_data *pd = v;
212
213 /* display header on line 1 */
214 if (v == SEQ_START_TOKEN) {
215 seq_puts(m, "TUNNEL ID, peer ID from IP to IP\n");
216 seq_puts(m, " L2TPv2/L2TPv3, UDP/IP\n");
217 seq_puts(m, " sessions session-count, refcnt refcnt/sk->refcnt\n");
218 seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
219 seq_puts(m, " SESSION ID, peer ID, PWTYPE\n");
220 seq_puts(m, " refcnt cnt\n");
221 seq_puts(m, " offset OFFSET l2specific TYPE/LEN\n");
222 seq_puts(m, " [ cookie ]\n");
223 seq_puts(m, " [ peer cookie ]\n");
224 seq_puts(m, " config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n");
225 seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
226 goto out;
227 }
228
229 /* Show the tunnel or session context */
230 if (pd->session == NULL)
231 l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
232 else
233 l2tp_dfs_seq_session_show(m, pd->session);
234
235out:
236 return 0;
237}
238
239static const struct seq_operations l2tp_dfs_seq_ops = {
240 .start = l2tp_dfs_seq_start,
241 .next = l2tp_dfs_seq_next,
242 .stop = l2tp_dfs_seq_stop,
243 .show = l2tp_dfs_seq_show,
244};
245
246static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
247{
248 struct l2tp_dfs_seq_data *pd;
249 struct seq_file *seq;
250 int rc = -ENOMEM;
251
252 pd = kzalloc(GFP_KERNEL, sizeof(*pd));
253 if (pd == NULL)
254 goto out;
255
256 /* Derive the network namespace from the pid opening the
257 * file.
258 */
259 pd->net = get_net_ns_by_pid(current->pid);
260 if (IS_ERR(pd->net)) {
261 rc = -PTR_ERR(pd->net);
262 goto err_free_pd;
263 }
264
265 rc = seq_open(file, &l2tp_dfs_seq_ops);
266 if (rc)
267 goto err_free_net;
268
269 seq = file->private_data;
270 seq->private = pd;
271
272out:
273 return rc;
274
275err_free_net:
276 put_net(pd->net);
277err_free_pd:
278 kfree(pd);
279 goto out;
280}
281
282static int l2tp_dfs_seq_release(struct inode *inode, struct file *file)
283{
284 struct l2tp_dfs_seq_data *pd;
285 struct seq_file *seq;
286
287 seq = file->private_data;
288 pd = seq->private;
289 if (pd->net)
290 put_net(pd->net);
291 kfree(pd);
292 seq_release(inode, file);
293
294 return 0;
295}
296
297static const struct file_operations l2tp_dfs_fops = {
298 .owner = THIS_MODULE,
299 .open = l2tp_dfs_seq_open,
300 .read = seq_read,
301 .llseek = seq_lseek,
302 .release = l2tp_dfs_seq_release,
303};
304
305static int __init l2tp_debugfs_init(void)
306{
307 int rc = 0;
308
309 rootdir = debugfs_create_dir("l2tp", NULL);
310 if (IS_ERR(rootdir)) {
311 rc = PTR_ERR(rootdir);
312 rootdir = NULL;
313 goto out;
314 }
315
316 tunnels = debugfs_create_file("tunnels", 0600, rootdir, NULL, &l2tp_dfs_fops);
317 if (tunnels == NULL)
318 rc = -EIO;
319
320 printk(KERN_INFO "L2TP debugfs support\n");
321
322out:
323 if (rc)
324 printk(KERN_WARNING "l2tp debugfs: unable to init\n");
325
326 return rc;
327}
328
329static void __exit l2tp_debugfs_exit(void)
330{
331 debugfs_remove(tunnels);
332 debugfs_remove(rootdir);
333}
334
335module_init(l2tp_debugfs_init);
336module_exit(l2tp_debugfs_exit);
337
338MODULE_LICENSE("GPL");
339MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
340MODULE_DESCRIPTION("L2TP debugfs driver");
341MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
new file mode 100644
index 000000000000..ca1164afeb74
--- /dev/null
+++ b/net/l2tp/l2tp_eth.c
@@ -0,0 +1,361 @@
1/*
2 * L2TPv3 ethernet pseudowire driver
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/socket.h>
15#include <linux/hash.h>
16#include <linux/l2tp.h>
17#include <linux/in.h>
18#include <linux/etherdevice.h>
19#include <linux/spinlock.h>
20#include <net/sock.h>
21#include <net/ip.h>
22#include <net/icmp.h>
23#include <net/udp.h>
24#include <net/inet_common.h>
25#include <net/inet_hashtables.h>
26#include <net/tcp_states.h>
27#include <net/protocol.h>
28#include <net/xfrm.h>
29#include <net/net_namespace.h>
30#include <net/netns/generic.h>
31
32#include "l2tp_core.h"
33
34/* Default device name. May be overridden by name specified by user */
35#define L2TP_ETH_DEV_NAME "l2tpeth%d"
36
37/* via netdev_priv() */
38struct l2tp_eth {
39 struct net_device *dev;
40 struct sock *tunnel_sock;
41 struct l2tp_session *session;
42 struct list_head list;
43};
44
45/* via l2tp_session_priv() */
46struct l2tp_eth_sess {
47 struct net_device *dev;
48};
49
50/* per-net private data for this module */
51static unsigned int l2tp_eth_net_id;
52struct l2tp_eth_net {
53 struct list_head l2tp_eth_dev_list;
54 spinlock_t l2tp_eth_lock;
55};
56
57static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
58{
59 return net_generic(net, l2tp_eth_net_id);
60}
61
62static int l2tp_eth_dev_init(struct net_device *dev)
63{
64 struct l2tp_eth *priv = netdev_priv(dev);
65
66 priv->dev = dev;
67 random_ether_addr(dev->dev_addr);
68 memset(&dev->broadcast[0], 0xff, 6);
69
70 return 0;
71}
72
73static void l2tp_eth_dev_uninit(struct net_device *dev)
74{
75 struct l2tp_eth *priv = netdev_priv(dev);
76 struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
77
78 spin_lock(&pn->l2tp_eth_lock);
79 list_del_init(&priv->list);
80 spin_unlock(&pn->l2tp_eth_lock);
81 dev_put(dev);
82}
83
84static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
85{
86 struct l2tp_eth *priv = netdev_priv(dev);
87 struct l2tp_session *session = priv->session;
88
89 l2tp_xmit_skb(session, skb, session->hdr_len);
90
91 dev->stats.tx_bytes += skb->len;
92 dev->stats.tx_packets++;
93
94 return 0;
95}
96
97static struct net_device_ops l2tp_eth_netdev_ops = {
98 .ndo_init = l2tp_eth_dev_init,
99 .ndo_uninit = l2tp_eth_dev_uninit,
100 .ndo_start_xmit = l2tp_eth_dev_xmit,
101};
102
103static void l2tp_eth_dev_setup(struct net_device *dev)
104{
105 ether_setup(dev);
106
107 dev->netdev_ops = &l2tp_eth_netdev_ops;
108 dev->destructor = free_netdev;
109}
110
111static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
112{
113 struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
114 struct net_device *dev = spriv->dev;
115
116 if (session->debug & L2TP_MSG_DATA) {
117 unsigned int length;
118 int offset;
119 u8 *ptr = skb->data;
120
121 length = min(32u, skb->len);
122 if (!pskb_may_pull(skb, length))
123 goto error;
124
125 printk(KERN_DEBUG "%s: eth recv: ", session->name);
126
127 offset = 0;
128 do {
129 printk(" %02X", ptr[offset]);
130 } while (++offset < length);
131
132 printk("\n");
133 }
134
135 if (data_len < ETH_HLEN)
136 goto error;
137
138 secpath_reset(skb);
139
140 /* checksums verified by L2TP */
141 skb->ip_summed = CHECKSUM_NONE;
142
143 skb_dst_drop(skb);
144 nf_reset(skb);
145
146 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
147 dev->last_rx = jiffies;
148 dev->stats.rx_packets++;
149 dev->stats.rx_bytes += data_len;
150 } else
151 dev->stats.rx_errors++;
152
153 return;
154
155error:
156 dev->stats.rx_errors++;
157 kfree_skb(skb);
158}
159
160static void l2tp_eth_delete(struct l2tp_session *session)
161{
162 struct l2tp_eth_sess *spriv;
163 struct net_device *dev;
164
165 if (session) {
166 spriv = l2tp_session_priv(session);
167 dev = spriv->dev;
168 if (dev) {
169 unregister_netdev(dev);
170 spriv->dev = NULL;
171 }
172 }
173}
174
175#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
176static void l2tp_eth_show(struct seq_file *m, void *arg)
177{
178 struct l2tp_session *session = arg;
179 struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
180 struct net_device *dev = spriv->dev;
181
182 seq_printf(m, " interface %s\n", dev->name);
183}
184#endif
185
186static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
187{
188 struct net_device *dev;
189 char name[IFNAMSIZ];
190 struct l2tp_tunnel *tunnel;
191 struct l2tp_session *session;
192 struct l2tp_eth *priv;
193 struct l2tp_eth_sess *spriv;
194 int rc;
195 struct l2tp_eth_net *pn;
196
197 tunnel = l2tp_tunnel_find(net, tunnel_id);
198 if (!tunnel) {
199 rc = -ENODEV;
200 goto out;
201 }
202
203 session = l2tp_session_find(net, tunnel, session_id);
204 if (session) {
205 rc = -EEXIST;
206 goto out;
207 }
208
209 if (cfg->ifname) {
210 dev = dev_get_by_name(net, cfg->ifname);
211 if (dev) {
212 dev_put(dev);
213 rc = -EEXIST;
214 goto out;
215 }
216 strlcpy(name, cfg->ifname, IFNAMSIZ);
217 } else
218 strcpy(name, L2TP_ETH_DEV_NAME);
219
220 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
221 peer_session_id, cfg);
222 if (!session) {
223 rc = -ENOMEM;
224 goto out;
225 }
226
227 dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
228 if (!dev) {
229 rc = -ENOMEM;
230 goto out_del_session;
231 }
232
233 dev_net_set(dev, net);
234 if (session->mtu == 0)
235 session->mtu = dev->mtu - session->hdr_len;
236 dev->mtu = session->mtu;
237 dev->needed_headroom += session->hdr_len;
238
239 priv = netdev_priv(dev);
240 priv->dev = dev;
241 priv->session = session;
242 INIT_LIST_HEAD(&priv->list);
243
244 priv->tunnel_sock = tunnel->sock;
245 session->recv_skb = l2tp_eth_dev_recv;
246 session->session_close = l2tp_eth_delete;
247#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
248 session->show = l2tp_eth_show;
249#endif
250
251 spriv = l2tp_session_priv(session);
252 spriv->dev = dev;
253
254 rc = register_netdev(dev);
255 if (rc < 0)
256 goto out_del_dev;
257
258 /* Must be done after register_netdev() */
259 strlcpy(session->ifname, dev->name, IFNAMSIZ);
260
261 dev_hold(dev);
262 pn = l2tp_eth_pernet(dev_net(dev));
263 spin_lock(&pn->l2tp_eth_lock);
264 list_add(&priv->list, &pn->l2tp_eth_dev_list);
265 spin_unlock(&pn->l2tp_eth_lock);
266
267 return 0;
268
269out_del_dev:
270 free_netdev(dev);
271out_del_session:
272 l2tp_session_delete(session);
273out:
274 return rc;
275}
276
277static __net_init int l2tp_eth_init_net(struct net *net)
278{
279 struct l2tp_eth_net *pn;
280 int err;
281
282 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
283 if (!pn)
284 return -ENOMEM;
285
286 INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
287 spin_lock_init(&pn->l2tp_eth_lock);
288
289 err = net_assign_generic(net, l2tp_eth_net_id, pn);
290 if (err)
291 goto out;
292
293 return 0;
294
295out:
296 kfree(pn);
297 return err;
298}
299
300static __net_exit void l2tp_eth_exit_net(struct net *net)
301{
302 struct l2tp_eth_net *pn;
303
304 pn = net_generic(net, l2tp_eth_net_id);
305 /*
306 * if someone has cached our net then
307 * further net_generic call will return NULL
308 */
309 net_assign_generic(net, l2tp_eth_net_id, NULL);
310 kfree(pn);
311}
312
313static __net_initdata struct pernet_operations l2tp_eth_net_ops = {
314 .init = l2tp_eth_init_net,
315 .exit = l2tp_eth_exit_net,
316 .id = &l2tp_eth_net_id,
317 .size = sizeof(struct l2tp_eth_net),
318};
319
320
321static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
322 .session_create = l2tp_eth_create,
323 .session_delete = l2tp_session_delete,
324};
325
326
327static int __init l2tp_eth_init(void)
328{
329 int err = 0;
330
331 err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
332 if (err)
333 goto out;
334
335 err = register_pernet_device(&l2tp_eth_net_ops);
336 if (err)
337 goto out_unreg;
338
339 printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
340
341 return 0;
342
343out_unreg:
344 l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
345out:
346 return err;
347}
348
349static void __exit l2tp_eth_exit(void)
350{
351 unregister_pernet_device(&l2tp_eth_net_ops);
352 l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
353}
354
355module_init(l2tp_eth_init);
356module_exit(l2tp_eth_exit);
357
358MODULE_LICENSE("GPL");
359MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
360MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
361MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
new file mode 100644
index 000000000000..0852512d392c
--- /dev/null
+++ b/net/l2tp/l2tp_ip.c
@@ -0,0 +1,679 @@
1/*
2 * L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/icmp.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/random.h>
16#include <linux/socket.h>
17#include <linux/l2tp.h>
18#include <linux/in.h>
19#include <net/sock.h>
20#include <net/ip.h>
21#include <net/icmp.h>
22#include <net/udp.h>
23#include <net/inet_common.h>
24#include <net/inet_hashtables.h>
25#include <net/tcp_states.h>
26#include <net/protocol.h>
27#include <net/xfrm.h>
28
29#include "l2tp_core.h"
30
31struct l2tp_ip_sock {
32 /* inet_sock has to be the first member of l2tp_ip_sock */
33 struct inet_sock inet;
34
35 __u32 conn_id;
36 __u32 peer_conn_id;
37
38 __u64 tx_packets;
39 __u64 tx_bytes;
40 __u64 tx_errors;
41 __u64 rx_packets;
42 __u64 rx_bytes;
43 __u64 rx_errors;
44};
45
46static DEFINE_RWLOCK(l2tp_ip_lock);
47static struct hlist_head l2tp_ip_table;
48static struct hlist_head l2tp_ip_bind_table;
49
50static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
51{
52 return (struct l2tp_ip_sock *)sk;
53}
54
55static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
56{
57 struct hlist_node *node;
58 struct sock *sk;
59
60 sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
61 struct inet_sock *inet = inet_sk(sk);
62 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
63
64 if (l2tp == NULL)
65 continue;
66
67 if ((l2tp->conn_id == tunnel_id) &&
68#ifdef CONFIG_NET_NS
69 (sk->sk_net == net) &&
70#endif
71 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
72 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
73 goto found;
74 }
75
76 sk = NULL;
77found:
78 return sk;
79}
80
81static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
82{
83 struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
84 if (sk)
85 sock_hold(sk);
86
87 return sk;
88}
89
90/* When processing receive frames, there are two cases to
91 * consider. Data frames consist of a non-zero session-id and an
92 * optional cookie. Control frames consist of a regular L2TP header
93 * preceded by 32-bits of zeros.
94 *
95 * L2TPv3 Session Header Over IP
96 *
97 * 0 1 2 3
98 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
100 * | Session ID |
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * | Cookie (optional, maximum 64 bits)...
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 *
107 * L2TPv3 Control Message Header Over IP
108 *
109 * 0 1 2 3
110 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 * | (32 bits of zeros) |
113 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
114 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
115 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116 * | Control Connection ID |
117 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118 * | Ns | Nr |
119 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
120 *
121 * All control frames are passed to userspace.
122 */
123static int l2tp_ip_recv(struct sk_buff *skb)
124{
125 struct sock *sk;
126 u32 session_id;
127 u32 tunnel_id;
128 unsigned char *ptr, *optr;
129 struct l2tp_session *session;
130 struct l2tp_tunnel *tunnel = NULL;
131 int length;
132 int offset;
133
134 /* Point to L2TP header */
135 optr = ptr = skb->data;
136
137 if (!pskb_may_pull(skb, 4))
138 goto discard;
139
140 session_id = ntohl(*((__be32 *) ptr));
141 ptr += 4;
142
143 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
144 * the session_id. If it is 0, the packet is a L2TP control
145 * frame and the session_id value can be discarded.
146 */
147 if (session_id == 0) {
148 __skb_pull(skb, 4);
149 goto pass_up;
150 }
151
152 /* Ok, this is a data packet. Lookup the session. */
153 session = l2tp_session_find(&init_net, NULL, session_id);
154 if (session == NULL)
155 goto discard;
156
157 tunnel = session->tunnel;
158 if (tunnel == NULL)
159 goto discard;
160
161 /* Trace packet contents, if enabled */
162 if (tunnel->debug & L2TP_MSG_DATA) {
163 length = min(32u, skb->len);
164 if (!pskb_may_pull(skb, length))
165 goto discard;
166
167 printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
168
169 offset = 0;
170 do {
171 printk(" %02X", ptr[offset]);
172 } while (++offset < length);
173
174 printk("\n");
175 }
176
177 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
178
179 return 0;
180
181pass_up:
182 /* Get the tunnel_id from the L2TP header */
183 if (!pskb_may_pull(skb, 12))
184 goto discard;
185
186 if ((skb->data[0] & 0xc0) != 0xc0)
187 goto discard;
188
189 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
190 tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
191 if (tunnel != NULL)
192 sk = tunnel->sock;
193 else {
194 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
195
196 read_lock_bh(&l2tp_ip_lock);
197 sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
198 read_unlock_bh(&l2tp_ip_lock);
199 }
200
201 if (sk == NULL)
202 goto discard;
203
204 sock_hold(sk);
205
206 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
207 goto discard_put;
208
209 nf_reset(skb);
210
211 return sk_receive_skb(sk, skb, 1);
212
213discard_put:
214 sock_put(sk);
215
216discard:
217 kfree_skb(skb);
218 return 0;
219}
220
221static int l2tp_ip_open(struct sock *sk)
222{
223 /* Prevent autobind. We don't have ports. */
224 inet_sk(sk)->inet_num = IPPROTO_L2TP;
225
226 write_lock_bh(&l2tp_ip_lock);
227 sk_add_node(sk, &l2tp_ip_table);
228 write_unlock_bh(&l2tp_ip_lock);
229
230 return 0;
231}
232
233static void l2tp_ip_close(struct sock *sk, long timeout)
234{
235 write_lock_bh(&l2tp_ip_lock);
236 hlist_del_init(&sk->sk_bind_node);
237 hlist_del_init(&sk->sk_node);
238 write_unlock_bh(&l2tp_ip_lock);
239 sk_common_release(sk);
240}
241
242static void l2tp_ip_destroy_sock(struct sock *sk)
243{
244 struct sk_buff *skb;
245
246 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
247 kfree_skb(skb);
248
249 sk_refcnt_debug_dec(sk);
250}
251
252static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
253{
254 struct inet_sock *inet = inet_sk(sk);
255 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
256 int ret = -EINVAL;
257 int chk_addr_ret;
258
259 ret = -EADDRINUSE;
260 read_lock_bh(&l2tp_ip_lock);
261 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
262 goto out_in_use;
263
264 read_unlock_bh(&l2tp_ip_lock);
265
266 lock_sock(sk);
267 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
268 goto out;
269
270 chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
271 ret = -EADDRNOTAVAIL;
272 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
273 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
274 goto out;
275
276 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
277 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
278 inet->inet_saddr = 0; /* Use device */
279 sk_dst_reset(sk);
280
281 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
282
283 write_lock_bh(&l2tp_ip_lock);
284 sk_add_bind_node(sk, &l2tp_ip_bind_table);
285 sk_del_node_init(sk);
286 write_unlock_bh(&l2tp_ip_lock);
287 ret = 0;
288out:
289 release_sock(sk);
290
291 return ret;
292
293out_in_use:
294 read_unlock_bh(&l2tp_ip_lock);
295
296 return ret;
297}
298
299static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
300{
301 int rc;
302 struct inet_sock *inet = inet_sk(sk);
303 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
304 struct rtable *rt;
305 __be32 saddr;
306 int oif;
307
308 rc = -EINVAL;
309 if (addr_len < sizeof(*lsa))
310 goto out;
311
312 rc = -EAFNOSUPPORT;
313 if (lsa->l2tp_family != AF_INET)
314 goto out;
315
316 sk_dst_reset(sk);
317
318 oif = sk->sk_bound_dev_if;
319 saddr = inet->inet_saddr;
320
321 rc = -EINVAL;
322 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
323 goto out;
324
325 rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr,
326 RT_CONN_FLAGS(sk), oif,
327 IPPROTO_L2TP,
328 0, 0, sk, 1);
329 if (rc) {
330 if (rc == -ENETUNREACH)
331 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
332 goto out;
333 }
334
335 rc = -ENETUNREACH;
336 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
337 ip_rt_put(rt);
338 goto out;
339 }
340
341 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
342
343 if (!inet->inet_saddr)
344 inet->inet_saddr = rt->rt_src;
345 if (!inet->inet_rcv_saddr)
346 inet->inet_rcv_saddr = rt->rt_src;
347 inet->inet_daddr = rt->rt_dst;
348 sk->sk_state = TCP_ESTABLISHED;
349 inet->inet_id = jiffies;
350
351 sk_dst_set(sk, &rt->u.dst);
352
353 write_lock_bh(&l2tp_ip_lock);
354 hlist_del_init(&sk->sk_bind_node);
355 sk_add_bind_node(sk, &l2tp_ip_bind_table);
356 write_unlock_bh(&l2tp_ip_lock);
357
358 rc = 0;
359out:
360 return rc;
361}
362
363static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
364 int *uaddr_len, int peer)
365{
366 struct sock *sk = sock->sk;
367 struct inet_sock *inet = inet_sk(sk);
368 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
369 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
370
371 memset(lsa, 0, sizeof(*lsa));
372 lsa->l2tp_family = AF_INET;
373 if (peer) {
374 if (!inet->inet_dport)
375 return -ENOTCONN;
376 lsa->l2tp_conn_id = lsk->peer_conn_id;
377 lsa->l2tp_addr.s_addr = inet->inet_daddr;
378 } else {
379 __be32 addr = inet->inet_rcv_saddr;
380 if (!addr)
381 addr = inet->inet_saddr;
382 lsa->l2tp_conn_id = lsk->conn_id;
383 lsa->l2tp_addr.s_addr = addr;
384 }
385 *uaddr_len = sizeof(*lsa);
386 return 0;
387}
388
389static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
390{
391 int rc;
392
393 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
394 goto drop;
395
396 nf_reset(skb);
397
398 /* Charge it to the socket, dropping if the queue is full. */
399 rc = sock_queue_rcv_skb(sk, skb);
400 if (rc < 0)
401 goto drop;
402
403 return 0;
404
405drop:
406 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
407 kfree_skb(skb);
408 return -1;
409}
410
411/* Userspace will call sendmsg() on the tunnel socket to send L2TP
412 * control frames.
413 */
414static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
415{
416 struct sk_buff *skb;
417 int rc;
418 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
419 struct inet_sock *inet = inet_sk(sk);
420 struct ip_options *opt = inet->opt;
421 struct rtable *rt = NULL;
422 int connected = 0;
423 __be32 daddr;
424
425 if (sock_flag(sk, SOCK_DEAD))
426 return -ENOTCONN;
427
428 /* Get and verify the address. */
429 if (msg->msg_name) {
430 struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
431 if (msg->msg_namelen < sizeof(*lip))
432 return -EINVAL;
433
434 if (lip->l2tp_family != AF_INET) {
435 if (lip->l2tp_family != AF_UNSPEC)
436 return -EAFNOSUPPORT;
437 }
438
439 daddr = lip->l2tp_addr.s_addr;
440 } else {
441 if (sk->sk_state != TCP_ESTABLISHED)
442 return -EDESTADDRREQ;
443
444 daddr = inet->inet_daddr;
445 connected = 1;
446 }
447
448 /* Allocate a socket buffer */
449 rc = -ENOMEM;
450 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
451 4 + len, 0, GFP_KERNEL);
452 if (!skb)
453 goto error;
454
455 /* Reserve space for headers, putting IP header on 4-byte boundary. */
456 skb_reserve(skb, 2 + NET_SKB_PAD);
457 skb_reset_network_header(skb);
458 skb_reserve(skb, sizeof(struct iphdr));
459 skb_reset_transport_header(skb);
460
461 /* Insert 0 session_id */
462 *((__be32 *) skb_put(skb, 4)) = 0;
463
464 /* Copy user data into skb */
465 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
466 if (rc < 0) {
467 kfree_skb(skb);
468 goto error;
469 }
470
471 if (connected)
472 rt = (struct rtable *) __sk_dst_check(sk, 0);
473
474 if (rt == NULL) {
475 /* Use correct destination address if we have options. */
476 if (opt && opt->srr)
477 daddr = opt->faddr;
478
479 {
480 struct flowi fl = { .oif = sk->sk_bound_dev_if,
481 .nl_u = { .ip4_u = {
482 .daddr = daddr,
483 .saddr = inet->inet_saddr,
484 .tos = RT_CONN_FLAGS(sk) } },
485 .proto = sk->sk_protocol,
486 .flags = inet_sk_flowi_flags(sk),
487 .uli_u = { .ports = {
488 .sport = inet->inet_sport,
489 .dport = inet->inet_dport } } };
490
491 /* If this fails, retransmit mechanism of transport layer will
492 * keep trying until route appears or the connection times
493 * itself out.
494 */
495 security_sk_classify_flow(sk, &fl);
496 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
497 goto no_route;
498 }
499 sk_setup_caps(sk, &rt->u.dst);
500 }
501 skb_dst_set(skb, dst_clone(&rt->u.dst));
502
503 /* Queue the packet to IP for output */
504 rc = ip_queue_xmit(skb);
505
506error:
507 /* Update stats */
508 if (rc >= 0) {
509 lsa->tx_packets++;
510 lsa->tx_bytes += len;
511 rc = len;
512 } else {
513 lsa->tx_errors++;
514 }
515
516 return rc;
517
518no_route:
519 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
520 kfree_skb(skb);
521 return -EHOSTUNREACH;
522}
523
524static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
525 size_t len, int noblock, int flags, int *addr_len)
526{
527 struct inet_sock *inet = inet_sk(sk);
528 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
529 size_t copied = 0;
530 int err = -EOPNOTSUPP;
531 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
532 struct sk_buff *skb;
533
534 if (flags & MSG_OOB)
535 goto out;
536
537 if (addr_len)
538 *addr_len = sizeof(*sin);
539
540 skb = skb_recv_datagram(sk, flags, noblock, &err);
541 if (!skb)
542 goto out;
543
544 copied = skb->len;
545 if (len < copied) {
546 msg->msg_flags |= MSG_TRUNC;
547 copied = len;
548 }
549
550 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
551 if (err)
552 goto done;
553
554 sock_recv_timestamp(msg, sk, skb);
555
556 /* Copy the address. */
557 if (sin) {
558 sin->sin_family = AF_INET;
559 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
560 sin->sin_port = 0;
561 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
562 }
563 if (inet->cmsg_flags)
564 ip_cmsg_recv(msg, skb);
565 if (flags & MSG_TRUNC)
566 copied = skb->len;
567done:
568 skb_free_datagram(sk, skb);
569out:
570 if (err) {
571 lsk->rx_errors++;
572 return err;
573 }
574
575 lsk->rx_packets++;
576 lsk->rx_bytes += copied;
577
578 return copied;
579}
580
581struct proto l2tp_ip_prot = {
582 .name = "L2TP/IP",
583 .owner = THIS_MODULE,
584 .init = l2tp_ip_open,
585 .close = l2tp_ip_close,
586 .bind = l2tp_ip_bind,
587 .connect = l2tp_ip_connect,
588 .disconnect = udp_disconnect,
589 .ioctl = udp_ioctl,
590 .destroy = l2tp_ip_destroy_sock,
591 .setsockopt = ip_setsockopt,
592 .getsockopt = ip_getsockopt,
593 .sendmsg = l2tp_ip_sendmsg,
594 .recvmsg = l2tp_ip_recvmsg,
595 .backlog_rcv = l2tp_ip_backlog_recv,
596 .hash = inet_hash,
597 .unhash = inet_unhash,
598 .obj_size = sizeof(struct l2tp_ip_sock),
599#ifdef CONFIG_COMPAT
600 .compat_setsockopt = compat_ip_setsockopt,
601 .compat_getsockopt = compat_ip_getsockopt,
602#endif
603};
604
605static const struct proto_ops l2tp_ip_ops = {
606 .family = PF_INET,
607 .owner = THIS_MODULE,
608 .release = inet_release,
609 .bind = inet_bind,
610 .connect = inet_dgram_connect,
611 .socketpair = sock_no_socketpair,
612 .accept = sock_no_accept,
613 .getname = l2tp_ip_getname,
614 .poll = datagram_poll,
615 .ioctl = inet_ioctl,
616 .listen = sock_no_listen,
617 .shutdown = inet_shutdown,
618 .setsockopt = sock_common_setsockopt,
619 .getsockopt = sock_common_getsockopt,
620 .sendmsg = inet_sendmsg,
621 .recvmsg = sock_common_recvmsg,
622 .mmap = sock_no_mmap,
623 .sendpage = sock_no_sendpage,
624#ifdef CONFIG_COMPAT
625 .compat_setsockopt = compat_sock_common_setsockopt,
626 .compat_getsockopt = compat_sock_common_getsockopt,
627#endif
628};
629
630static struct inet_protosw l2tp_ip_protosw = {
631 .type = SOCK_DGRAM,
632 .protocol = IPPROTO_L2TP,
633 .prot = &l2tp_ip_prot,
634 .ops = &l2tp_ip_ops,
635 .no_check = 0,
636};
637
638static struct net_protocol l2tp_ip_protocol __read_mostly = {
639 .handler = l2tp_ip_recv,
640};
641
642static int __init l2tp_ip_init(void)
643{
644 int err;
645
646 printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
647
648 err = proto_register(&l2tp_ip_prot, 1);
649 if (err != 0)
650 goto out;
651
652 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
653 if (err)
654 goto out1;
655
656 inet_register_protosw(&l2tp_ip_protosw);
657 return 0;
658
659out1:
660 proto_unregister(&l2tp_ip_prot);
661out:
662 return err;
663}
664
665static void __exit l2tp_ip_exit(void)
666{
667 inet_unregister_protosw(&l2tp_ip_protosw);
668 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
669 proto_unregister(&l2tp_ip_prot);
670}
671
672module_init(l2tp_ip_init);
673module_exit(l2tp_ip_exit);
674
675MODULE_LICENSE("GPL");
676MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
677MODULE_DESCRIPTION("L2TP over IP");
678MODULE_VERSION("1.0");
679MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
new file mode 100644
index 000000000000..4c1e540732d7
--- /dev/null
+++ b/net/l2tp/l2tp_netlink.c
@@ -0,0 +1,840 @@
1/*
2 * L2TP netlink layer, for management
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * Partly based on the IrDA nelink implementation
7 * (see net/irda/irnetlink.c) which is:
8 * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org>
9 * which is in turn partly based on the wireless netlink code:
10 * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <net/sock.h>
18#include <net/genetlink.h>
19#include <net/udp.h>
20#include <linux/in.h>
21#include <linux/udp.h>
22#include <linux/socket.h>
23#include <linux/module.h>
24#include <linux/list.h>
25#include <net/net_namespace.h>
26
27#include <linux/l2tp.h>
28
29#include "l2tp_core.h"
30
31
32static struct genl_family l2tp_nl_family = {
33 .id = GENL_ID_GENERATE,
34 .name = L2TP_GENL_NAME,
35 .version = L2TP_GENL_VERSION,
36 .hdrsize = 0,
37 .maxattr = L2TP_ATTR_MAX,
38};
39
40/* Accessed under genl lock */
41static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
42
43static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
44{
45 u32 tunnel_id;
46 u32 session_id;
47 char *ifname;
48 struct l2tp_tunnel *tunnel;
49 struct l2tp_session *session = NULL;
50 struct net *net = genl_info_net(info);
51
52 if (info->attrs[L2TP_ATTR_IFNAME]) {
53 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
54 session = l2tp_session_find_by_ifname(net, ifname);
55 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
56 (info->attrs[L2TP_ATTR_CONN_ID])) {
57 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
58 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
59 tunnel = l2tp_tunnel_find(net, tunnel_id);
60 if (tunnel)
61 session = l2tp_session_find(net, tunnel, session_id);
62 }
63
64 return session;
65}
66
67static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
68{
69 struct sk_buff *msg;
70 void *hdr;
71 int ret = -ENOBUFS;
72
73 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
74 if (!msg) {
75 ret = -ENOMEM;
76 goto out;
77 }
78
79 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
80 &l2tp_nl_family, 0, L2TP_CMD_NOOP);
81 if (IS_ERR(hdr)) {
82 ret = PTR_ERR(hdr);
83 goto err_out;
84 }
85
86 genlmsg_end(msg, hdr);
87
88 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
89
90err_out:
91 nlmsg_free(msg);
92
93out:
94 return ret;
95}
96
97static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
98{
99 u32 tunnel_id;
100 u32 peer_tunnel_id;
101 int proto_version;
102 int fd;
103 int ret = 0;
104 struct l2tp_tunnel_cfg cfg = { 0, };
105 struct l2tp_tunnel *tunnel;
106 struct net *net = genl_info_net(info);
107
108 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
109 ret = -EINVAL;
110 goto out;
111 }
112 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
113
114 if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
115 ret = -EINVAL;
116 goto out;
117 }
118 peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);
119
120 if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
121 ret = -EINVAL;
122 goto out;
123 }
124 proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);
125
126 if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
127 ret = -EINVAL;
128 goto out;
129 }
130 cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);
131
132 fd = -1;
133 if (info->attrs[L2TP_ATTR_FD]) {
134 fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
135 } else {
136 if (info->attrs[L2TP_ATTR_IP_SADDR])
137 cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]);
138 if (info->attrs[L2TP_ATTR_IP_DADDR])
139 cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]);
140 if (info->attrs[L2TP_ATTR_UDP_SPORT])
141 cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
142 if (info->attrs[L2TP_ATTR_UDP_DPORT])
143 cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
144 if (info->attrs[L2TP_ATTR_UDP_CSUM])
145 cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
146 }
147
148 if (info->attrs[L2TP_ATTR_DEBUG])
149 cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
150
151 tunnel = l2tp_tunnel_find(net, tunnel_id);
152 if (tunnel != NULL) {
153 ret = -EEXIST;
154 goto out;
155 }
156
157 ret = -EINVAL;
158 switch (cfg.encap) {
159 case L2TP_ENCAPTYPE_UDP:
160 case L2TP_ENCAPTYPE_IP:
161 ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
162 peer_tunnel_id, &cfg, &tunnel);
163 break;
164 }
165
166out:
167 return ret;
168}
169
170static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info)
171{
172 struct l2tp_tunnel *tunnel;
173 u32 tunnel_id;
174 int ret = 0;
175 struct net *net = genl_info_net(info);
176
177 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
178 ret = -EINVAL;
179 goto out;
180 }
181 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
182
183 tunnel = l2tp_tunnel_find(net, tunnel_id);
184 if (tunnel == NULL) {
185 ret = -ENODEV;
186 goto out;
187 }
188
189 (void) l2tp_tunnel_delete(tunnel);
190
191out:
192 return ret;
193}
194
195static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info)
196{
197 struct l2tp_tunnel *tunnel;
198 u32 tunnel_id;
199 int ret = 0;
200 struct net *net = genl_info_net(info);
201
202 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
203 ret = -EINVAL;
204 goto out;
205 }
206 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
207
208 tunnel = l2tp_tunnel_find(net, tunnel_id);
209 if (tunnel == NULL) {
210 ret = -ENODEV;
211 goto out;
212 }
213
214 if (info->attrs[L2TP_ATTR_DEBUG])
215 tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
216
217out:
218 return ret;
219}
220
221static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
222 struct l2tp_tunnel *tunnel)
223{
224 void *hdr;
225 struct nlattr *nest;
226 struct sock *sk = NULL;
227 struct inet_sock *inet;
228
229 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
230 L2TP_CMD_TUNNEL_GET);
231 if (IS_ERR(hdr))
232 return PTR_ERR(hdr);
233
234 NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
235 NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
236 NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
237 NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
238 NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
239
240 nest = nla_nest_start(skb, L2TP_ATTR_STATS);
241 if (nest == NULL)
242 goto nla_put_failure;
243
244 NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
245 NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
246 NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
247 NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
248 NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
249 NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
250 NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
251 NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
252 nla_nest_end(skb, nest);
253
254 sk = tunnel->sock;
255 if (!sk)
256 goto out;
257
258 inet = inet_sk(sk);
259
260 switch (tunnel->encap) {
261 case L2TP_ENCAPTYPE_UDP:
262 NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
263 NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
264 NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
265 /* NOBREAK */
266 case L2TP_ENCAPTYPE_IP:
267 NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
268 NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
269 break;
270 }
271
272out:
273 return genlmsg_end(skb, hdr);
274
275nla_put_failure:
276 genlmsg_cancel(skb, hdr);
277 return -1;
278}
279
280static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
281{
282 struct l2tp_tunnel *tunnel;
283 struct sk_buff *msg;
284 u32 tunnel_id;
285 int ret = -ENOBUFS;
286 struct net *net = genl_info_net(info);
287
288 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
289 ret = -EINVAL;
290 goto out;
291 }
292
293 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
294
295 tunnel = l2tp_tunnel_find(net, tunnel_id);
296 if (tunnel == NULL) {
297 ret = -ENODEV;
298 goto out;
299 }
300
301 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
302 if (!msg) {
303 ret = -ENOMEM;
304 goto out;
305 }
306
307 ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq,
308 NLM_F_ACK, tunnel);
309 if (ret < 0)
310 goto err_out;
311
312 return genlmsg_unicast(net, msg, info->snd_pid);
313
314err_out:
315 nlmsg_free(msg);
316
317out:
318 return ret;
319}
320
321static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
322{
323 int ti = cb->args[0];
324 struct l2tp_tunnel *tunnel;
325 struct net *net = sock_net(skb->sk);
326
327 for (;;) {
328 tunnel = l2tp_tunnel_find_nth(net, ti);
329 if (tunnel == NULL)
330 goto out;
331
332 if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid,
333 cb->nlh->nlmsg_seq, NLM_F_MULTI,
334 tunnel) <= 0)
335 goto out;
336
337 ti++;
338 }
339
340out:
341 cb->args[0] = ti;
342
343 return skb->len;
344}
345
346static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info)
347{
348 u32 tunnel_id = 0;
349 u32 session_id;
350 u32 peer_session_id;
351 int ret = 0;
352 struct l2tp_tunnel *tunnel;
353 struct l2tp_session *session;
354 struct l2tp_session_cfg cfg = { 0, };
355 struct net *net = genl_info_net(info);
356
357 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
358 ret = -EINVAL;
359 goto out;
360 }
361 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
362 tunnel = l2tp_tunnel_find(net, tunnel_id);
363 if (!tunnel) {
364 ret = -ENODEV;
365 goto out;
366 }
367
368 if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
369 ret = -EINVAL;
370 goto out;
371 }
372 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
373 session = l2tp_session_find(net, tunnel, session_id);
374 if (session) {
375 ret = -EEXIST;
376 goto out;
377 }
378
379 if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
380 ret = -EINVAL;
381 goto out;
382 }
383 peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
384
385 if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
386 ret = -EINVAL;
387 goto out;
388 }
389 cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
390 if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
391 ret = -EINVAL;
392 goto out;
393 }
394
395 if (tunnel->version > 2) {
396 if (info->attrs[L2TP_ATTR_OFFSET])
397 cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
398
399 if (info->attrs[L2TP_ATTR_DATA_SEQ])
400 cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
401
402 cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
403 if (info->attrs[L2TP_ATTR_L2SPEC_TYPE])
404 cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
405
406 cfg.l2specific_len = 4;
407 if (info->attrs[L2TP_ATTR_L2SPEC_LEN])
408 cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]);
409
410 if (info->attrs[L2TP_ATTR_COOKIE]) {
411 u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
412 if (len > 8) {
413 ret = -EINVAL;
414 goto out;
415 }
416 cfg.cookie_len = len;
417 memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
418 }
419 if (info->attrs[L2TP_ATTR_PEER_COOKIE]) {
420 u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
421 if (len > 8) {
422 ret = -EINVAL;
423 goto out;
424 }
425 cfg.peer_cookie_len = len;
426 memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
427 }
428 if (info->attrs[L2TP_ATTR_IFNAME])
429 cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
430
431 if (info->attrs[L2TP_ATTR_VLAN_ID])
432 cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]);
433 }
434
435 if (info->attrs[L2TP_ATTR_DEBUG])
436 cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
437
438 if (info->attrs[L2TP_ATTR_RECV_SEQ])
439 cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
440
441 if (info->attrs[L2TP_ATTR_SEND_SEQ])
442 cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
443
444 if (info->attrs[L2TP_ATTR_LNS_MODE])
445 cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
446
447 if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
448 cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
449
450 if (info->attrs[L2TP_ATTR_MTU])
451 cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
452
453 if (info->attrs[L2TP_ATTR_MRU])
454 cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
455
456 if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
457 (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
458 ret = -EPROTONOSUPPORT;
459 goto out;
460 }
461
462 /* Check that pseudowire-specific params are present */
463 switch (cfg.pw_type) {
464 case L2TP_PWTYPE_NONE:
465 break;
466 case L2TP_PWTYPE_ETH_VLAN:
467 if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
468 ret = -EINVAL;
469 goto out;
470 }
471 break;
472 case L2TP_PWTYPE_ETH:
473 break;
474 case L2TP_PWTYPE_PPP:
475 case L2TP_PWTYPE_PPP_AC:
476 break;
477 case L2TP_PWTYPE_IP:
478 default:
479 ret = -EPROTONOSUPPORT;
480 break;
481 }
482
483 ret = -EPROTONOSUPPORT;
484 if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
485 ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
486 session_id, peer_session_id, &cfg);
487
488out:
489 return ret;
490}
491
492static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info)
493{
494 int ret = 0;
495 struct l2tp_session *session;
496 u16 pw_type;
497
498 session = l2tp_nl_session_find(info);
499 if (session == NULL) {
500 ret = -ENODEV;
501 goto out;
502 }
503
504 pw_type = session->pwtype;
505 if (pw_type < __L2TP_PWTYPE_MAX)
506 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
507 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
508
509out:
510 return ret;
511}
512
513static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info)
514{
515 int ret = 0;
516 struct l2tp_session *session;
517
518 session = l2tp_nl_session_find(info);
519 if (session == NULL) {
520 ret = -ENODEV;
521 goto out;
522 }
523
524 if (info->attrs[L2TP_ATTR_DEBUG])
525 session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
526
527 if (info->attrs[L2TP_ATTR_DATA_SEQ])
528 session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
529
530 if (info->attrs[L2TP_ATTR_RECV_SEQ])
531 session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
532
533 if (info->attrs[L2TP_ATTR_SEND_SEQ])
534 session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
535
536 if (info->attrs[L2TP_ATTR_LNS_MODE])
537 session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
538
539 if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
540 session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
541
542 if (info->attrs[L2TP_ATTR_MTU])
543 session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
544
545 if (info->attrs[L2TP_ATTR_MRU])
546 session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
547
548out:
549 return ret;
550}
551
552static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
553 struct l2tp_session *session)
554{
555 void *hdr;
556 struct nlattr *nest;
557 struct l2tp_tunnel *tunnel = session->tunnel;
558 struct sock *sk = NULL;
559
560 sk = tunnel->sock;
561
562 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
563 if (IS_ERR(hdr))
564 return PTR_ERR(hdr);
565
566 NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
567 NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
568 NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
569 NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
570 NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
571 NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
572 NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
573 if (session->mru)
574 NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
575
576 if (session->ifname && session->ifname[0])
577 NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
578 if (session->cookie_len)
579 NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
580 if (session->peer_cookie_len)
581 NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
582 NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
583 NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
584 NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
585#ifdef CONFIG_XFRM
586 if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
587 NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
588#endif
589 if (session->reorder_timeout)
590 NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
591
592 nest = nla_nest_start(skb, L2TP_ATTR_STATS);
593 if (nest == NULL)
594 goto nla_put_failure;
595 NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
596 NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
597 NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
598 NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
599 NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
600 NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
601 NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
602 NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
603 nla_nest_end(skb, nest);
604
605 return genlmsg_end(skb, hdr);
606
607 nla_put_failure:
608 genlmsg_cancel(skb, hdr);
609 return -1;
610}
611
612static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
613{
614 struct l2tp_session *session;
615 struct sk_buff *msg;
616 int ret;
617
618 session = l2tp_nl_session_find(info);
619 if (session == NULL) {
620 ret = -ENODEV;
621 goto out;
622 }
623
624 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
625 if (!msg) {
626 ret = -ENOMEM;
627 goto out;
628 }
629
630 ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
631 0, session);
632 if (ret < 0)
633 goto err_out;
634
635 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
636
637err_out:
638 nlmsg_free(msg);
639
640out:
641 return ret;
642}
643
644static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
645{
646 struct net *net = sock_net(skb->sk);
647 struct l2tp_session *session;
648 struct l2tp_tunnel *tunnel = NULL;
649 int ti = cb->args[0];
650 int si = cb->args[1];
651
652 for (;;) {
653 if (tunnel == NULL) {
654 tunnel = l2tp_tunnel_find_nth(net, ti);
655 if (tunnel == NULL)
656 goto out;
657 }
658
659 session = l2tp_session_find_nth(tunnel, si);
660 if (session == NULL) {
661 ti++;
662 tunnel = NULL;
663 si = 0;
664 continue;
665 }
666
667 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
668 cb->nlh->nlmsg_seq, NLM_F_MULTI,
669 session) <= 0)
670 break;
671
672 si++;
673 }
674
675out:
676 cb->args[0] = ti;
677 cb->args[1] = si;
678
679 return skb->len;
680}
681
682static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
683 [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, },
684 [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, },
685 [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, },
686 [L2TP_ATTR_OFFSET] = { .type = NLA_U16, },
687 [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, },
688 [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, },
689 [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, },
690 [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, },
691 [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, },
692 [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, },
693 [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, },
694 [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, },
695 [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, },
696 [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, },
697 [L2TP_ATTR_DEBUG] = { .type = NLA_U32, },
698 [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, },
699 [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, },
700 [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, },
701 [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, },
702 [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, },
703 [L2TP_ATTR_FD] = { .type = NLA_U32, },
704 [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, },
705 [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, },
706 [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, },
707 [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, },
708 [L2TP_ATTR_MTU] = { .type = NLA_U16, },
709 [L2TP_ATTR_MRU] = { .type = NLA_U16, },
710 [L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
711 [L2TP_ATTR_IFNAME] = {
712 .type = NLA_NUL_STRING,
713 .len = IFNAMSIZ - 1,
714 },
715 [L2TP_ATTR_COOKIE] = {
716 .type = NLA_BINARY,
717 .len = 8,
718 },
719 [L2TP_ATTR_PEER_COOKIE] = {
720 .type = NLA_BINARY,
721 .len = 8,
722 },
723};
724
725static struct genl_ops l2tp_nl_ops[] = {
726 {
727 .cmd = L2TP_CMD_NOOP,
728 .doit = l2tp_nl_cmd_noop,
729 .policy = l2tp_nl_policy,
730 /* can be retrieved by unprivileged users */
731 },
732 {
733 .cmd = L2TP_CMD_TUNNEL_CREATE,
734 .doit = l2tp_nl_cmd_tunnel_create,
735 .policy = l2tp_nl_policy,
736 .flags = GENL_ADMIN_PERM,
737 },
738 {
739 .cmd = L2TP_CMD_TUNNEL_DELETE,
740 .doit = l2tp_nl_cmd_tunnel_delete,
741 .policy = l2tp_nl_policy,
742 .flags = GENL_ADMIN_PERM,
743 },
744 {
745 .cmd = L2TP_CMD_TUNNEL_MODIFY,
746 .doit = l2tp_nl_cmd_tunnel_modify,
747 .policy = l2tp_nl_policy,
748 .flags = GENL_ADMIN_PERM,
749 },
750 {
751 .cmd = L2TP_CMD_TUNNEL_GET,
752 .doit = l2tp_nl_cmd_tunnel_get,
753 .dumpit = l2tp_nl_cmd_tunnel_dump,
754 .policy = l2tp_nl_policy,
755 .flags = GENL_ADMIN_PERM,
756 },
757 {
758 .cmd = L2TP_CMD_SESSION_CREATE,
759 .doit = l2tp_nl_cmd_session_create,
760 .policy = l2tp_nl_policy,
761 .flags = GENL_ADMIN_PERM,
762 },
763 {
764 .cmd = L2TP_CMD_SESSION_DELETE,
765 .doit = l2tp_nl_cmd_session_delete,
766 .policy = l2tp_nl_policy,
767 .flags = GENL_ADMIN_PERM,
768 },
769 {
770 .cmd = L2TP_CMD_SESSION_MODIFY,
771 .doit = l2tp_nl_cmd_session_modify,
772 .policy = l2tp_nl_policy,
773 .flags = GENL_ADMIN_PERM,
774 },
775 {
776 .cmd = L2TP_CMD_SESSION_GET,
777 .doit = l2tp_nl_cmd_session_get,
778 .dumpit = l2tp_nl_cmd_session_dump,
779 .policy = l2tp_nl_policy,
780 .flags = GENL_ADMIN_PERM,
781 },
782};
783
784int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops)
785{
786 int ret;
787
788 ret = -EINVAL;
789 if (pw_type >= __L2TP_PWTYPE_MAX)
790 goto err;
791
792 genl_lock();
793 ret = -EBUSY;
794 if (l2tp_nl_cmd_ops[pw_type])
795 goto out;
796
797 l2tp_nl_cmd_ops[pw_type] = ops;
798
799out:
800 genl_unlock();
801err:
802 return 0;
803}
804EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
805
806void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type)
807{
808 if (pw_type < __L2TP_PWTYPE_MAX) {
809 genl_lock();
810 l2tp_nl_cmd_ops[pw_type] = NULL;
811 genl_unlock();
812 }
813}
814EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
815
816static int l2tp_nl_init(void)
817{
818 int err;
819
820 printk(KERN_INFO "L2TP netlink interface\n");
821 err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
822 ARRAY_SIZE(l2tp_nl_ops));
823
824 return err;
825}
826
827static void l2tp_nl_cleanup(void)
828{
829 genl_unregister_family(&l2tp_nl_family);
830}
831
832module_init(l2tp_nl_init);
833module_exit(l2tp_nl_cleanup);
834
835MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
836MODULE_DESCRIPTION("L2TP netlink");
837MODULE_LICENSE("GPL");
838MODULE_VERSION("1.0");
839MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
840 __stringify(NETLINK_GENERIC) "-type-" "l2tp");
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
new file mode 100644
index 000000000000..90d82b3f2889
--- /dev/null
+++ b/net/l2tp/l2tp_ppp.c
@@ -0,0 +1,1837 @@
1/*****************************************************************************
2 * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
3 *
4 * PPPoX --- Generic PPP encapsulation socket family
5 * PPPoL2TP --- PPP over L2TP (RFC 2661)
6 *
7 * Version: 2.0.0
8 *
9 * Authors: James Chapman (jchapman@katalix.com)
10 *
11 * Based on original work by Martijn van Oosterhout <kleptog@svana.org>
12 *
13 * License:
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21/* This driver handles only L2TP data frames; control frames are handled by a
22 * userspace application.
23 *
24 * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
25 * attaches it to a bound UDP socket with local tunnel_id / session_id and
26 * peer tunnel_id / session_id set. Data can then be sent or received using
27 * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
28 * can be read or modified using ioctl() or [gs]etsockopt() calls.
29 *
30 * When a PPPoL2TP socket is connected with local and peer session_id values
31 * zero, the socket is treated as a special tunnel management socket.
32 *
33 * Here's example userspace code to create a socket for sending/receiving data
34 * over an L2TP session:-
35 *
36 * struct sockaddr_pppol2tp sax;
37 * int fd;
38 * int session_fd;
39 *
40 * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
41 *
42 * sax.sa_family = AF_PPPOX;
43 * sax.sa_protocol = PX_PROTO_OL2TP;
44 * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
45 * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
46 * sax.pppol2tp.addr.sin_port = addr->sin_port;
47 * sax.pppol2tp.addr.sin_family = AF_INET;
48 * sax.pppol2tp.s_tunnel = tunnel_id;
49 * sax.pppol2tp.s_session = session_id;
50 * sax.pppol2tp.d_tunnel = peer_tunnel_id;
51 * sax.pppol2tp.d_session = peer_session_id;
52 *
53 * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
54 *
55 * A pppd plugin that allows PPP traffic to be carried over L2TP using
56 * this driver is available from the OpenL2TP project at
57 * http://openl2tp.sourceforge.net.
58 */
59
60#include <linux/module.h>
61#include <linux/string.h>
62#include <linux/list.h>
63#include <linux/uaccess.h>
64
65#include <linux/kernel.h>
66#include <linux/spinlock.h>
67#include <linux/kthread.h>
68#include <linux/sched.h>
69#include <linux/slab.h>
70#include <linux/errno.h>
71#include <linux/jiffies.h>
72
73#include <linux/netdevice.h>
74#include <linux/net.h>
75#include <linux/inetdevice.h>
76#include <linux/skbuff.h>
77#include <linux/init.h>
78#include <linux/ip.h>
79#include <linux/udp.h>
80#include <linux/if_pppox.h>
81#include <linux/if_pppol2tp.h>
82#include <net/sock.h>
83#include <linux/ppp_channel.h>
84#include <linux/ppp_defs.h>
85#include <linux/if_ppp.h>
86#include <linux/file.h>
87#include <linux/hash.h>
88#include <linux/sort.h>
89#include <linux/proc_fs.h>
90#include <linux/l2tp.h>
91#include <linux/nsproxy.h>
92#include <net/net_namespace.h>
93#include <net/netns/generic.h>
94#include <net/dst.h>
95#include <net/ip.h>
96#include <net/udp.h>
97#include <net/xfrm.h>
98
99#include <asm/byteorder.h>
100#include <asm/atomic.h>
101
102#include "l2tp_core.h"
103
104#define PPPOL2TP_DRV_VERSION "V2.0"
105
106/* Space for UDP, L2TP and PPP headers */
107#define PPPOL2TP_HEADER_OVERHEAD 40
108
109#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
110 do { \
111 if ((_mask) & (_type)) \
112 printk(_lvl "PPPOL2TP: " _fmt, ##args); \
113 } while (0)
114
115/* Number of bytes to build transmit L2TP headers.
116 * Unfortunately the size is different depending on whether sequence numbers
117 * are enabled.
118 */
119#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
120#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
121
122/* Private data of each session. This data lives at the end of struct
123 * l2tp_session, referenced via session->priv[].
124 */
125struct pppol2tp_session {
126 int owner; /* pid that opened the socket */
127
128 struct sock *sock; /* Pointer to the session
129 * PPPoX socket */
130 struct sock *tunnel_sock; /* Pointer to the tunnel UDP
131 * socket */
132 int flags; /* accessed by PPPIOCGFLAGS.
133 * Unused. */
134};
135
136static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
137
138static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
139static const struct proto_ops pppol2tp_ops;
140
141/* Helpers to obtain tunnel/session contexts from sockets.
142 */
143static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
144{
145 struct l2tp_session *session;
146
147 if (sk == NULL)
148 return NULL;
149
150 sock_hold(sk);
151 session = (struct l2tp_session *)(sk->sk_user_data);
152 if (session == NULL) {
153 sock_put(sk);
154 goto out;
155 }
156
157 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
158
159out:
160 return session;
161}
162
163/*****************************************************************************
164 * Receive data handling
165 *****************************************************************************/
166
167static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
168{
169 /* Skip PPP header, if present. In testing, Microsoft L2TP clients
170 * don't send the PPP header (PPP header compression enabled), but
171 * other clients can include the header. So we cope with both cases
172 * here. The PPP header is always FF03 when using L2TP.
173 *
174 * Note that skb->data[] isn't dereferenced from a u16 ptr here since
175 * the field may be unaligned.
176 */
177 if (!pskb_may_pull(skb, 2))
178 return 1;
179
180 if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
181 skb_pull(skb, 2);
182
183 return 0;
184}
185
186/* Receive message. This is the recvmsg for the PPPoL2TP socket.
187 */
188static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
189 struct msghdr *msg, size_t len,
190 int flags)
191{
192 int err;
193 struct sk_buff *skb;
194 struct sock *sk = sock->sk;
195
196 err = -EIO;
197 if (sk->sk_state & PPPOX_BOUND)
198 goto end;
199
200 msg->msg_namelen = 0;
201
202 err = 0;
203 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
204 flags & MSG_DONTWAIT, &err);
205 if (!skb)
206 goto end;
207
208 if (len > skb->len)
209 len = skb->len;
210 else if (len < skb->len)
211 msg->msg_flags |= MSG_TRUNC;
212
213 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
214 if (likely(err == 0))
215 err = len;
216
217 kfree_skb(skb);
218end:
219 return err;
220}
221
222static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
223{
224 struct pppol2tp_session *ps = l2tp_session_priv(session);
225 struct sock *sk = NULL;
226
227 /* If the socket is bound, send it in to PPP's input queue. Otherwise
228 * queue it on the session socket.
229 */
230 sk = ps->sock;
231 if (sk == NULL)
232 goto no_sock;
233
234 if (sk->sk_state & PPPOX_BOUND) {
235 struct pppox_sock *po;
236 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
237 "%s: recv %d byte data frame, passing to ppp\n",
238 session->name, data_len);
239
240 /* We need to forget all info related to the L2TP packet
241 * gathered in the skb as we are going to reuse the same
242 * skb for the inner packet.
243 * Namely we need to:
244 * - reset xfrm (IPSec) information as it applies to
245 * the outer L2TP packet and not to the inner one
246 * - release the dst to force a route lookup on the inner
247 * IP packet since skb->dst currently points to the dst
248 * of the UDP tunnel
249 * - reset netfilter information as it doesn't apply
250 * to the inner packet either
251 */
252 secpath_reset(skb);
253 skb_dst_drop(skb);
254 nf_reset(skb);
255
256 po = pppox_sk(sk);
257 ppp_input(&po->chan, skb);
258 } else {
259 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
260 "%s: socket not bound\n", session->name);
261
262 /* Not bound. Nothing we can do, so discard. */
263 session->stats.rx_errors++;
264 kfree_skb(skb);
265 }
266
267 return;
268
269no_sock:
270 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
271 "%s: no socket\n", session->name);
272 kfree_skb(skb);
273}
274
275static void pppol2tp_session_sock_hold(struct l2tp_session *session)
276{
277 struct pppol2tp_session *ps = l2tp_session_priv(session);
278
279 if (ps->sock)
280 sock_hold(ps->sock);
281}
282
283static void pppol2tp_session_sock_put(struct l2tp_session *session)
284{
285 struct pppol2tp_session *ps = l2tp_session_priv(session);
286
287 if (ps->sock)
288 sock_put(ps->sock);
289}
290
291/************************************************************************
292 * Transmit handling
293 ***********************************************************************/
294
295/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
296 * when a user application does a sendmsg() on the session socket. L2TP and
297 * PPP headers must be inserted into the user's data.
298 */
299static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
300 size_t total_len)
301{
302 static const unsigned char ppph[2] = { 0xff, 0x03 };
303 struct sock *sk = sock->sk;
304 struct sk_buff *skb;
305 int error;
306 struct l2tp_session *session;
307 struct l2tp_tunnel *tunnel;
308 struct pppol2tp_session *ps;
309 int uhlen;
310
311 error = -ENOTCONN;
312 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
313 goto error;
314
315 /* Get session and tunnel contexts */
316 error = -EBADF;
317 session = pppol2tp_sock_to_session(sk);
318 if (session == NULL)
319 goto error;
320
321 ps = l2tp_session_priv(session);
322 tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
323 if (tunnel == NULL)
324 goto error_put_sess;
325
326 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
327
328 /* Allocate a socket buffer */
329 error = -ENOMEM;
330 skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
331 uhlen + session->hdr_len +
332 sizeof(ppph) + total_len,
333 0, GFP_KERNEL);
334 if (!skb)
335 goto error_put_sess_tun;
336
337 /* Reserve space for headers. */
338 skb_reserve(skb, NET_SKB_PAD);
339 skb_reset_network_header(skb);
340 skb_reserve(skb, sizeof(struct iphdr));
341 skb_reset_transport_header(skb);
342 skb_reserve(skb, uhlen);
343
344 /* Add PPP header */
345 skb->data[0] = ppph[0];
346 skb->data[1] = ppph[1];
347 skb_put(skb, 2);
348
349 /* Copy user data into skb */
350 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
351 if (error < 0) {
352 kfree_skb(skb);
353 goto error_put_sess_tun;
354 }
355 skb_put(skb, total_len);
356
357 l2tp_xmit_skb(session, skb, session->hdr_len);
358
359 sock_put(ps->tunnel_sock);
360
361 return error;
362
363error_put_sess_tun:
364 sock_put(ps->tunnel_sock);
365error_put_sess:
366 sock_put(sk);
367error:
368 return error;
369}
370
371/* Transmit function called by generic PPP driver. Sends PPP frame
372 * over PPPoL2TP socket.
373 *
374 * This is almost the same as pppol2tp_sendmsg(), but rather than
375 * being called with a msghdr from userspace, it is called with a skb
376 * from the kernel.
377 *
378 * The supplied skb from ppp doesn't have enough headroom for the
379 * insertion of L2TP, UDP and IP headers so we need to allocate more
380 * headroom in the skb. This will create a cloned skb. But we must be
381 * careful in the error case because the caller will expect to free
382 * the skb it supplied, not our cloned skb. So we take care to always
383 * leave the original skb unfreed if we return an error.
384 */
385static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
386{
387 static const u8 ppph[2] = { 0xff, 0x03 };
388 struct sock *sk = (struct sock *) chan->private;
389 struct sock *sk_tun;
390 struct l2tp_session *session;
391 struct l2tp_tunnel *tunnel;
392 struct pppol2tp_session *ps;
393 int old_headroom;
394 int new_headroom;
395
396 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
397 goto abort;
398
399 /* Get session and tunnel contexts from the socket */
400 session = pppol2tp_sock_to_session(sk);
401 if (session == NULL)
402 goto abort;
403
404 ps = l2tp_session_priv(session);
405 sk_tun = ps->tunnel_sock;
406 if (sk_tun == NULL)
407 goto abort_put_sess;
408 tunnel = l2tp_sock_to_tunnel(sk_tun);
409 if (tunnel == NULL)
410 goto abort_put_sess;
411
412 old_headroom = skb_headroom(skb);
413 if (skb_cow_head(skb, sizeof(ppph)))
414 goto abort_put_sess_tun;
415
416 new_headroom = skb_headroom(skb);
417 skb->truesize += new_headroom - old_headroom;
418
419 /* Setup PPP header */
420 __skb_push(skb, sizeof(ppph));
421 skb->data[0] = ppph[0];
422 skb->data[1] = ppph[1];
423
424 l2tp_xmit_skb(session, skb, session->hdr_len);
425
426 sock_put(sk_tun);
427 sock_put(sk);
428 return 1;
429
430abort_put_sess_tun:
431 sock_put(sk_tun);
432abort_put_sess:
433 sock_put(sk);
434abort:
435 /* Free the original skb */
436 kfree_skb(skb);
437 return 1;
438}
439
440/*****************************************************************************
441 * Session (and tunnel control) socket create/destroy.
442 *****************************************************************************/
443
444/* Called by l2tp_core when a session socket is being closed.
445 */
446static void pppol2tp_session_close(struct l2tp_session *session)
447{
448 struct pppol2tp_session *ps = l2tp_session_priv(session);
449 struct sock *sk = ps->sock;
450 struct sk_buff *skb;
451
452 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
453
454 if (session->session_id == 0)
455 goto out;
456
457 if (sk != NULL) {
458 lock_sock(sk);
459
460 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
461 pppox_unbind_sock(sk);
462 sk->sk_state = PPPOX_DEAD;
463 sk->sk_state_change(sk);
464 }
465
466 /* Purge any queued data */
467 skb_queue_purge(&sk->sk_receive_queue);
468 skb_queue_purge(&sk->sk_write_queue);
469 while ((skb = skb_dequeue(&session->reorder_q))) {
470 kfree_skb(skb);
471 sock_put(sk);
472 }
473
474 release_sock(sk);
475 }
476
477out:
478 return;
479}
480
481/* Really kill the session socket. (Called from sock_put() if
482 * refcnt == 0.)
483 */
484static void pppol2tp_session_destruct(struct sock *sk)
485{
486 struct l2tp_session *session;
487
488 if (sk->sk_user_data != NULL) {
489 session = sk->sk_user_data;
490 if (session == NULL)
491 goto out;
492
493 sk->sk_user_data = NULL;
494 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
495 l2tp_session_dec_refcount(session);
496 }
497
498out:
499 return;
500}
501
502/* Called when the PPPoX socket (session) is closed.
503 */
504static int pppol2tp_release(struct socket *sock)
505{
506 struct sock *sk = sock->sk;
507 struct l2tp_session *session;
508 int error;
509
510 if (!sk)
511 return 0;
512
513 error = -EBADF;
514 lock_sock(sk);
515 if (sock_flag(sk, SOCK_DEAD) != 0)
516 goto error;
517
518 pppox_unbind_sock(sk);
519
520 /* Signal the death of the socket. */
521 sk->sk_state = PPPOX_DEAD;
522 sock_orphan(sk);
523 sock->sk = NULL;
524
525 session = pppol2tp_sock_to_session(sk);
526
527 /* Purge any queued data */
528 skb_queue_purge(&sk->sk_receive_queue);
529 skb_queue_purge(&sk->sk_write_queue);
530 if (session != NULL) {
531 struct sk_buff *skb;
532 while ((skb = skb_dequeue(&session->reorder_q))) {
533 kfree_skb(skb);
534 sock_put(sk);
535 }
536 sock_put(sk);
537 }
538
539 release_sock(sk);
540
541 /* This will delete the session context via
542 * pppol2tp_session_destruct() if the socket's refcnt drops to
543 * zero.
544 */
545 sock_put(sk);
546
547 return 0;
548
549error:
550 release_sock(sk);
551 return error;
552}
553
554static struct proto pppol2tp_sk_proto = {
555 .name = "PPPOL2TP",
556 .owner = THIS_MODULE,
557 .obj_size = sizeof(struct pppox_sock),
558};
559
560static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
561{
562 int rc;
563
564 rc = l2tp_udp_encap_recv(sk, skb);
565 if (rc)
566 kfree_skb(skb);
567
568 return NET_RX_SUCCESS;
569}
570
571/* socket() handler. Initialize a new struct sock.
572 */
573static int pppol2tp_create(struct net *net, struct socket *sock)
574{
575 int error = -ENOMEM;
576 struct sock *sk;
577
578 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
579 if (!sk)
580 goto out;
581
582 sock_init_data(sock, sk);
583
584 sock->state = SS_UNCONNECTED;
585 sock->ops = &pppol2tp_ops;
586
587 sk->sk_backlog_rcv = pppol2tp_backlog_recv;
588 sk->sk_protocol = PX_PROTO_OL2TP;
589 sk->sk_family = PF_PPPOX;
590 sk->sk_state = PPPOX_NONE;
591 sk->sk_type = SOCK_STREAM;
592 sk->sk_destruct = pppol2tp_session_destruct;
593
594 error = 0;
595
596out:
597 return error;
598}
599
600#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
601static void pppol2tp_show(struct seq_file *m, void *arg)
602{
603 struct l2tp_session *session = arg;
604 struct pppol2tp_session *ps = l2tp_session_priv(session);
605
606 if (ps) {
607 struct pppox_sock *po = pppox_sk(ps->sock);
608 if (po)
609 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
610 }
611}
612#endif
613
614/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
615 */
616static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
617 int sockaddr_len, int flags)
618{
619 struct sock *sk = sock->sk;
620 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
621 struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
622 struct pppox_sock *po = pppox_sk(sk);
623 struct l2tp_session *session = NULL;
624 struct l2tp_tunnel *tunnel;
625 struct pppol2tp_session *ps;
626 struct dst_entry *dst;
627 struct l2tp_session_cfg cfg = { 0, };
628 int error = 0;
629 u32 tunnel_id, peer_tunnel_id;
630 u32 session_id, peer_session_id;
631 int ver = 2;
632 int fd;
633
634 lock_sock(sk);
635
636 error = -EINVAL;
637 if (sp->sa_protocol != PX_PROTO_OL2TP)
638 goto end;
639
640 /* Check for already bound sockets */
641 error = -EBUSY;
642 if (sk->sk_state & PPPOX_CONNECTED)
643 goto end;
644
645 /* We don't supporting rebinding anyway */
646 error = -EALREADY;
647 if (sk->sk_user_data)
648 goto end; /* socket is already attached */
649
650 /* Get params from socket address. Handle L2TPv2 and L2TPv3 */
651 if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
652 fd = sp->pppol2tp.fd;
653 tunnel_id = sp->pppol2tp.s_tunnel;
654 peer_tunnel_id = sp->pppol2tp.d_tunnel;
655 session_id = sp->pppol2tp.s_session;
656 peer_session_id = sp->pppol2tp.d_session;
657 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
658 ver = 3;
659 fd = sp3->pppol2tp.fd;
660 tunnel_id = sp3->pppol2tp.s_tunnel;
661 peer_tunnel_id = sp3->pppol2tp.d_tunnel;
662 session_id = sp3->pppol2tp.s_session;
663 peer_session_id = sp3->pppol2tp.d_session;
664 } else {
665 error = -EINVAL;
666 goto end; /* bad socket address */
667 }
668
669 /* Don't bind if tunnel_id is 0 */
670 error = -EINVAL;
671 if (tunnel_id == 0)
672 goto end;
673
674 tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id);
675
676 /* Special case: create tunnel context if session_id and
677 * peer_session_id is 0. Otherwise look up tunnel using supplied
678 * tunnel id.
679 */
680 if ((session_id == 0) && (peer_session_id == 0)) {
681 if (tunnel == NULL) {
682 struct l2tp_tunnel_cfg tcfg = {
683 .encap = L2TP_ENCAPTYPE_UDP,
684 .debug = 0,
685 };
686 error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
687 if (error < 0)
688 goto end;
689 }
690 } else {
691 /* Error if we can't find the tunnel */
692 error = -ENOENT;
693 if (tunnel == NULL)
694 goto end;
695
696 /* Error if socket is not prepped */
697 if (tunnel->sock == NULL)
698 goto end;
699 }
700
701 if (tunnel->recv_payload_hook == NULL)
702 tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
703
704 if (tunnel->peer_tunnel_id == 0) {
705 if (ver == 2)
706 tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
707 else
708 tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
709 }
710
711 /* Create session if it doesn't already exist. We handle the
712 * case where a session was previously created by the netlink
713 * interface by checking that the session doesn't already have
714 * a socket and its tunnel socket are what we expect. If any
715 * of those checks fail, return EEXIST to the caller.
716 */
717 session = l2tp_session_find(sock_net(sk), tunnel, session_id);
718 if (session == NULL) {
719 /* Default MTU must allow space for UDP/L2TP/PPP
720 * headers.
721 */
722 cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
723
724 /* Allocate and initialize a new session context. */
725 session = l2tp_session_create(sizeof(struct pppol2tp_session),
726 tunnel, session_id,
727 peer_session_id, &cfg);
728 if (session == NULL) {
729 error = -ENOMEM;
730 goto end;
731 }
732 } else {
733 ps = l2tp_session_priv(session);
734 error = -EEXIST;
735 if (ps->sock != NULL)
736 goto end;
737
738 /* consistency checks */
739 if (ps->tunnel_sock != tunnel->sock)
740 goto end;
741 }
742
743 /* Associate session with its PPPoL2TP socket */
744 ps = l2tp_session_priv(session);
745 ps->owner = current->pid;
746 ps->sock = sk;
747 ps->tunnel_sock = tunnel->sock;
748
749 session->recv_skb = pppol2tp_recv;
750 session->session_close = pppol2tp_session_close;
751#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
752 session->show = pppol2tp_show;
753#endif
754
755 /* We need to know each time a skb is dropped from the reorder
756 * queue.
757 */
758 session->ref = pppol2tp_session_sock_hold;
759 session->deref = pppol2tp_session_sock_put;
760
761 /* If PMTU discovery was enabled, use the MTU that was discovered */
762 dst = sk_dst_get(sk);
763 if (dst != NULL) {
764 u32 pmtu = dst_mtu(__sk_dst_get(sk));
765 if (pmtu != 0)
766 session->mtu = session->mru = pmtu -
767 PPPOL2TP_HEADER_OVERHEAD;
768 dst_release(dst);
769 }
770
771 /* Special case: if source & dest session_id == 0x0000, this
772 * socket is being created to manage the tunnel. Just set up
773 * the internal context for use by ioctl() and sockopt()
774 * handlers.
775 */
776 if ((session->session_id == 0) &&
777 (session->peer_session_id == 0)) {
778 error = 0;
779 goto out_no_ppp;
780 }
781
782 /* The only header we need to worry about is the L2TP
783 * header. This size is different depending on whether
784 * sequence numbers are enabled for the data channel.
785 */
786 po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
787
788 po->chan.private = sk;
789 po->chan.ops = &pppol2tp_chan_ops;
790 po->chan.mtu = session->mtu;
791
792 error = ppp_register_net_channel(sock_net(sk), &po->chan);
793 if (error)
794 goto end;
795
796out_no_ppp:
797 /* This is how we get the session context from the socket. */
798 sk->sk_user_data = session;
799 sk->sk_state = PPPOX_CONNECTED;
800 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
801 "%s: created\n", session->name);
802
803end:
804 release_sock(sk);
805
806 return error;
807}
808
809#ifdef CONFIG_L2TP_V3
810
811/* Called when creating sessions via the netlink interface.
812 */
813static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
814{
815 int error;
816 struct l2tp_tunnel *tunnel;
817 struct l2tp_session *session;
818 struct pppol2tp_session *ps;
819
820 tunnel = l2tp_tunnel_find(net, tunnel_id);
821
822 /* Error if we can't find the tunnel */
823 error = -ENOENT;
824 if (tunnel == NULL)
825 goto out;
826
827 /* Error if tunnel socket is not prepped */
828 if (tunnel->sock == NULL)
829 goto out;
830
831 /* Check that this session doesn't already exist */
832 error = -EEXIST;
833 session = l2tp_session_find(net, tunnel, session_id);
834 if (session != NULL)
835 goto out;
836
837 /* Default MTU values. */
838 if (cfg->mtu == 0)
839 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
840 if (cfg->mru == 0)
841 cfg->mru = cfg->mtu;
842
843 /* Allocate and initialize a new session context. */
844 error = -ENOMEM;
845 session = l2tp_session_create(sizeof(struct pppol2tp_session),
846 tunnel, session_id,
847 peer_session_id, cfg);
848 if (session == NULL)
849 goto out;
850
851 ps = l2tp_session_priv(session);
852 ps->tunnel_sock = tunnel->sock;
853
854 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
855 "%s: created\n", session->name);
856
857 error = 0;
858
859out:
860 return error;
861}
862
863/* Called when deleting sessions via the netlink interface.
864 */
865static int pppol2tp_session_delete(struct l2tp_session *session)
866{
867 struct pppol2tp_session *ps = l2tp_session_priv(session);
868
869 if (ps->sock == NULL)
870 l2tp_session_dec_refcount(session);
871
872 return 0;
873}
874
875#endif /* CONFIG_L2TP_V3 */
876
877/* getname() support.
878 */
879static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
880 int *usockaddr_len, int peer)
881{
882 int len = 0;
883 int error = 0;
884 struct l2tp_session *session;
885 struct l2tp_tunnel *tunnel;
886 struct sock *sk = sock->sk;
887 struct inet_sock *inet;
888 struct pppol2tp_session *pls;
889
890 error = -ENOTCONN;
891 if (sk == NULL)
892 goto end;
893 if (sk->sk_state != PPPOX_CONNECTED)
894 goto end;
895
896 error = -EBADF;
897 session = pppol2tp_sock_to_session(sk);
898 if (session == NULL)
899 goto end;
900
901 pls = l2tp_session_priv(session);
902 tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock);
903 if (tunnel == NULL) {
904 error = -EBADF;
905 goto end_put_sess;
906 }
907
908 inet = inet_sk(sk);
909 if (tunnel->version == 2) {
910 struct sockaddr_pppol2tp sp;
911 len = sizeof(sp);
912 memset(&sp, 0, len);
913 sp.sa_family = AF_PPPOX;
914 sp.sa_protocol = PX_PROTO_OL2TP;
915 sp.pppol2tp.fd = tunnel->fd;
916 sp.pppol2tp.pid = pls->owner;
917 sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
918 sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
919 sp.pppol2tp.s_session = session->session_id;
920 sp.pppol2tp.d_session = session->peer_session_id;
921 sp.pppol2tp.addr.sin_family = AF_INET;
922 sp.pppol2tp.addr.sin_port = inet->inet_dport;
923 sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
924 memcpy(uaddr, &sp, len);
925 } else if (tunnel->version == 3) {
926 struct sockaddr_pppol2tpv3 sp;
927 len = sizeof(sp);
928 memset(&sp, 0, len);
929 sp.sa_family = AF_PPPOX;
930 sp.sa_protocol = PX_PROTO_OL2TP;
931 sp.pppol2tp.fd = tunnel->fd;
932 sp.pppol2tp.pid = pls->owner;
933 sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
934 sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
935 sp.pppol2tp.s_session = session->session_id;
936 sp.pppol2tp.d_session = session->peer_session_id;
937 sp.pppol2tp.addr.sin_family = AF_INET;
938 sp.pppol2tp.addr.sin_port = inet->inet_dport;
939 sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
940 memcpy(uaddr, &sp, len);
941 }
942
943 *usockaddr_len = len;
944
945 sock_put(pls->tunnel_sock);
946end_put_sess:
947 sock_put(sk);
948 error = 0;
949
950end:
951 return error;
952}
953
954/****************************************************************************
955 * ioctl() handlers.
956 *
957 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
958 * sockets. However, in order to control kernel tunnel features, we allow
959 * userspace to create a special "tunnel" PPPoX socket which is used for
960 * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
961 * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
962 * calls.
963 ****************************************************************************/
964
965static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
966 struct l2tp_stats *stats)
967{
968 dest->tx_packets = stats->tx_packets;
969 dest->tx_bytes = stats->tx_bytes;
970 dest->tx_errors = stats->tx_errors;
971 dest->rx_packets = stats->rx_packets;
972 dest->rx_bytes = stats->rx_bytes;
973 dest->rx_seq_discards = stats->rx_seq_discards;
974 dest->rx_oos_packets = stats->rx_oos_packets;
975 dest->rx_errors = stats->rx_errors;
976}
977
978/* Session ioctl helper.
979 */
980static int pppol2tp_session_ioctl(struct l2tp_session *session,
981 unsigned int cmd, unsigned long arg)
982{
983 struct ifreq ifr;
984 int err = 0;
985 struct sock *sk;
986 int val = (int) arg;
987 struct pppol2tp_session *ps = l2tp_session_priv(session);
988 struct l2tp_tunnel *tunnel = session->tunnel;
989 struct pppol2tp_ioc_stats stats;
990
991 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
992 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
993 session->name, cmd, arg);
994
995 sk = ps->sock;
996 sock_hold(sk);
997
998 switch (cmd) {
999 case SIOCGIFMTU:
1000 err = -ENXIO;
1001 if (!(sk->sk_state & PPPOX_CONNECTED))
1002 break;
1003
1004 err = -EFAULT;
1005 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1006 break;
1007 ifr.ifr_mtu = session->mtu;
1008 if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
1009 break;
1010
1011 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1012 "%s: get mtu=%d\n", session->name, session->mtu);
1013 err = 0;
1014 break;
1015
1016 case SIOCSIFMTU:
1017 err = -ENXIO;
1018 if (!(sk->sk_state & PPPOX_CONNECTED))
1019 break;
1020
1021 err = -EFAULT;
1022 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1023 break;
1024
1025 session->mtu = ifr.ifr_mtu;
1026
1027 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1028 "%s: set mtu=%d\n", session->name, session->mtu);
1029 err = 0;
1030 break;
1031
1032 case PPPIOCGMRU:
1033 err = -ENXIO;
1034 if (!(sk->sk_state & PPPOX_CONNECTED))
1035 break;
1036
1037 err = -EFAULT;
1038 if (put_user(session->mru, (int __user *) arg))
1039 break;
1040
1041 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1042 "%s: get mru=%d\n", session->name, session->mru);
1043 err = 0;
1044 break;
1045
1046 case PPPIOCSMRU:
1047 err = -ENXIO;
1048 if (!(sk->sk_state & PPPOX_CONNECTED))
1049 break;
1050
1051 err = -EFAULT;
1052 if (get_user(val, (int __user *) arg))
1053 break;
1054
1055 session->mru = val;
1056 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1057 "%s: set mru=%d\n", session->name, session->mru);
1058 err = 0;
1059 break;
1060
1061 case PPPIOCGFLAGS:
1062 err = -EFAULT;
1063 if (put_user(ps->flags, (int __user *) arg))
1064 break;
1065
1066 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1067 "%s: get flags=%d\n", session->name, ps->flags);
1068 err = 0;
1069 break;
1070
1071 case PPPIOCSFLAGS:
1072 err = -EFAULT;
1073 if (get_user(val, (int __user *) arg))
1074 break;
1075 ps->flags = val;
1076 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1077 "%s: set flags=%d\n", session->name, ps->flags);
1078 err = 0;
1079 break;
1080
1081 case PPPIOCGL2TPSTATS:
1082 err = -ENXIO;
1083 if (!(sk->sk_state & PPPOX_CONNECTED))
1084 break;
1085
1086 memset(&stats, 0, sizeof(stats));
1087 stats.tunnel_id = tunnel->tunnel_id;
1088 stats.session_id = session->session_id;
1089 pppol2tp_copy_stats(&stats, &session->stats);
1090 if (copy_to_user((void __user *) arg, &stats,
1091 sizeof(stats)))
1092 break;
1093 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1094 "%s: get L2TP stats\n", session->name);
1095 err = 0;
1096 break;
1097
1098 default:
1099 err = -ENOSYS;
1100 break;
1101 }
1102
1103 sock_put(sk);
1104
1105 return err;
1106}
1107
1108/* Tunnel ioctl helper.
1109 *
1110 * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
1111 * specifies a session_id, the session ioctl handler is called. This allows an
1112 * application to retrieve session stats via a tunnel socket.
1113 */
1114static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1115 unsigned int cmd, unsigned long arg)
1116{
1117 int err = 0;
1118 struct sock *sk;
1119 struct pppol2tp_ioc_stats stats;
1120
1121 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1122 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
1123 tunnel->name, cmd, arg);
1124
1125 sk = tunnel->sock;
1126 sock_hold(sk);
1127
1128 switch (cmd) {
1129 case PPPIOCGL2TPSTATS:
1130 err = -ENXIO;
1131 if (!(sk->sk_state & PPPOX_CONNECTED))
1132 break;
1133
1134 if (copy_from_user(&stats, (void __user *) arg,
1135 sizeof(stats))) {
1136 err = -EFAULT;
1137 break;
1138 }
1139 if (stats.session_id != 0) {
1140 /* resend to session ioctl handler */
1141 struct l2tp_session *session =
1142 l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
1143 if (session != NULL)
1144 err = pppol2tp_session_ioctl(session, cmd, arg);
1145 else
1146 err = -EBADR;
1147 break;
1148 }
1149#ifdef CONFIG_XFRM
1150 stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
1151#endif
1152 pppol2tp_copy_stats(&stats, &tunnel->stats);
1153 if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) {
1154 err = -EFAULT;
1155 break;
1156 }
1157 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1158 "%s: get L2TP stats\n", tunnel->name);
1159 err = 0;
1160 break;
1161
1162 default:
1163 err = -ENOSYS;
1164 break;
1165 }
1166
1167 sock_put(sk);
1168
1169 return err;
1170}
1171
1172/* Main ioctl() handler.
1173 * Dispatch to tunnel or session helpers depending on the socket.
1174 */
1175static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
1176 unsigned long arg)
1177{
1178 struct sock *sk = sock->sk;
1179 struct l2tp_session *session;
1180 struct l2tp_tunnel *tunnel;
1181 struct pppol2tp_session *ps;
1182 int err;
1183
1184 if (!sk)
1185 return 0;
1186
1187 err = -EBADF;
1188 if (sock_flag(sk, SOCK_DEAD) != 0)
1189 goto end;
1190
1191 err = -ENOTCONN;
1192 if ((sk->sk_user_data == NULL) ||
1193 (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
1194 goto end;
1195
1196 /* Get session context from the socket */
1197 err = -EBADF;
1198 session = pppol2tp_sock_to_session(sk);
1199 if (session == NULL)
1200 goto end;
1201
1202 /* Special case: if session's session_id is zero, treat ioctl as a
1203 * tunnel ioctl
1204 */
1205 ps = l2tp_session_priv(session);
1206 if ((session->session_id == 0) &&
1207 (session->peer_session_id == 0)) {
1208 err = -EBADF;
1209 tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
1210 if (tunnel == NULL)
1211 goto end_put_sess;
1212
1213 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
1214 sock_put(ps->tunnel_sock);
1215 goto end_put_sess;
1216 }
1217
1218 err = pppol2tp_session_ioctl(session, cmd, arg);
1219
1220end_put_sess:
1221 sock_put(sk);
1222end:
1223 return err;
1224}
1225
1226/*****************************************************************************
1227 * setsockopt() / getsockopt() support.
1228 *
1229 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
1230 * sockets. In order to control kernel tunnel features, we allow userspace to
1231 * create a special "tunnel" PPPoX socket which is used for control only.
1232 * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
1233 * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
1234 *****************************************************************************/
1235
1236/* Tunnel setsockopt() helper.
1237 */
1238static int pppol2tp_tunnel_setsockopt(struct sock *sk,
1239 struct l2tp_tunnel *tunnel,
1240 int optname, int val)
1241{
1242 int err = 0;
1243
1244 switch (optname) {
1245 case PPPOL2TP_SO_DEBUG:
1246 tunnel->debug = val;
1247 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1248 "%s: set debug=%x\n", tunnel->name, tunnel->debug);
1249 break;
1250
1251 default:
1252 err = -ENOPROTOOPT;
1253 break;
1254 }
1255
1256 return err;
1257}
1258
1259/* Session setsockopt helper.
1260 */
1261static int pppol2tp_session_setsockopt(struct sock *sk,
1262 struct l2tp_session *session,
1263 int optname, int val)
1264{
1265 int err = 0;
1266 struct pppol2tp_session *ps = l2tp_session_priv(session);
1267
1268 switch (optname) {
1269 case PPPOL2TP_SO_RECVSEQ:
1270 if ((val != 0) && (val != 1)) {
1271 err = -EINVAL;
1272 break;
1273 }
1274 session->recv_seq = val ? -1 : 0;
1275 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1276 "%s: set recv_seq=%d\n", session->name, session->recv_seq);
1277 break;
1278
1279 case PPPOL2TP_SO_SENDSEQ:
1280 if ((val != 0) && (val != 1)) {
1281 err = -EINVAL;
1282 break;
1283 }
1284 session->send_seq = val ? -1 : 0;
1285 {
1286 struct sock *ssk = ps->sock;
1287 struct pppox_sock *po = pppox_sk(ssk);
1288 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
1289 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1290 }
1291 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1292 "%s: set send_seq=%d\n", session->name, session->send_seq);
1293 break;
1294
1295 case PPPOL2TP_SO_LNSMODE:
1296 if ((val != 0) && (val != 1)) {
1297 err = -EINVAL;
1298 break;
1299 }
1300 session->lns_mode = val ? -1 : 0;
1301 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1302 "%s: set lns_mode=%d\n", session->name, session->lns_mode);
1303 break;
1304
1305 case PPPOL2TP_SO_DEBUG:
1306 session->debug = val;
1307 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1308 "%s: set debug=%x\n", session->name, session->debug);
1309 break;
1310
1311 case PPPOL2TP_SO_REORDERTO:
1312 session->reorder_timeout = msecs_to_jiffies(val);
1313 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1314 "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout);
1315 break;
1316
1317 default:
1318 err = -ENOPROTOOPT;
1319 break;
1320 }
1321
1322 return err;
1323}
1324
1325/* Main setsockopt() entry point.
1326 * Does API checks, then calls either the tunnel or session setsockopt
1327 * handler, according to whether the PPPoL2TP socket is a for a regular
1328 * session or the special tunnel type.
1329 */
1330static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1331 char __user *optval, unsigned int optlen)
1332{
1333 struct sock *sk = sock->sk;
1334 struct l2tp_session *session;
1335 struct l2tp_tunnel *tunnel;
1336 struct pppol2tp_session *ps;
1337 int val;
1338 int err;
1339
1340 if (level != SOL_PPPOL2TP)
1341 return udp_prot.setsockopt(sk, level, optname, optval, optlen);
1342
1343 if (optlen < sizeof(int))
1344 return -EINVAL;
1345
1346 if (get_user(val, (int __user *)optval))
1347 return -EFAULT;
1348
1349 err = -ENOTCONN;
1350 if (sk->sk_user_data == NULL)
1351 goto end;
1352
1353 /* Get session context from the socket */
1354 err = -EBADF;
1355 session = pppol2tp_sock_to_session(sk);
1356 if (session == NULL)
1357 goto end;
1358
1359 /* Special case: if session_id == 0x0000, treat as operation on tunnel
1360 */
1361 ps = l2tp_session_priv(session);
1362 if ((session->session_id == 0) &&
1363 (session->peer_session_id == 0)) {
1364 err = -EBADF;
1365 tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
1366 if (tunnel == NULL)
1367 goto end_put_sess;
1368
1369 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
1370 sock_put(ps->tunnel_sock);
1371 } else
1372 err = pppol2tp_session_setsockopt(sk, session, optname, val);
1373
1374 err = 0;
1375
1376end_put_sess:
1377 sock_put(sk);
1378end:
1379 return err;
1380}
1381
1382/* Tunnel getsockopt helper. Called with sock locked.
1383 */
1384static int pppol2tp_tunnel_getsockopt(struct sock *sk,
1385 struct l2tp_tunnel *tunnel,
1386 int optname, int *val)
1387{
1388 int err = 0;
1389
1390 switch (optname) {
1391 case PPPOL2TP_SO_DEBUG:
1392 *val = tunnel->debug;
1393 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1394 "%s: get debug=%x\n", tunnel->name, tunnel->debug);
1395 break;
1396
1397 default:
1398 err = -ENOPROTOOPT;
1399 break;
1400 }
1401
1402 return err;
1403}
1404
1405/* Session getsockopt helper. Called with sock locked.
1406 */
1407static int pppol2tp_session_getsockopt(struct sock *sk,
1408 struct l2tp_session *session,
1409 int optname, int *val)
1410{
1411 int err = 0;
1412
1413 switch (optname) {
1414 case PPPOL2TP_SO_RECVSEQ:
1415 *val = session->recv_seq;
1416 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1417 "%s: get recv_seq=%d\n", session->name, *val);
1418 break;
1419
1420 case PPPOL2TP_SO_SENDSEQ:
1421 *val = session->send_seq;
1422 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1423 "%s: get send_seq=%d\n", session->name, *val);
1424 break;
1425
1426 case PPPOL2TP_SO_LNSMODE:
1427 *val = session->lns_mode;
1428 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1429 "%s: get lns_mode=%d\n", session->name, *val);
1430 break;
1431
1432 case PPPOL2TP_SO_DEBUG:
1433 *val = session->debug;
1434 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1435 "%s: get debug=%d\n", session->name, *val);
1436 break;
1437
1438 case PPPOL2TP_SO_REORDERTO:
1439 *val = (int) jiffies_to_msecs(session->reorder_timeout);
1440 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1441 "%s: get reorder_timeout=%d\n", session->name, *val);
1442 break;
1443
1444 default:
1445 err = -ENOPROTOOPT;
1446 }
1447
1448 return err;
1449}
1450
1451/* Main getsockopt() entry point.
1452 * Does API checks, then calls either the tunnel or session getsockopt
1453 * handler, according to whether the PPPoX socket is a for a regular session
1454 * or the special tunnel type.
1455 */
1456static int pppol2tp_getsockopt(struct socket *sock, int level,
1457 int optname, char __user *optval, int __user *optlen)
1458{
1459 struct sock *sk = sock->sk;
1460 struct l2tp_session *session;
1461 struct l2tp_tunnel *tunnel;
1462 int val, len;
1463 int err;
1464 struct pppol2tp_session *ps;
1465
1466 if (level != SOL_PPPOL2TP)
1467 return udp_prot.getsockopt(sk, level, optname, optval, optlen);
1468
1469 if (get_user(len, (int __user *) optlen))
1470 return -EFAULT;
1471
1472 len = min_t(unsigned int, len, sizeof(int));
1473
1474 if (len < 0)
1475 return -EINVAL;
1476
1477 err = -ENOTCONN;
1478 if (sk->sk_user_data == NULL)
1479 goto end;
1480
1481 /* Get the session context */
1482 err = -EBADF;
1483 session = pppol2tp_sock_to_session(sk);
1484 if (session == NULL)
1485 goto end;
1486
1487 /* Special case: if session_id == 0x0000, treat as operation on tunnel */
1488 ps = l2tp_session_priv(session);
1489 if ((session->session_id == 0) &&
1490 (session->peer_session_id == 0)) {
1491 err = -EBADF;
1492 tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
1493 if (tunnel == NULL)
1494 goto end_put_sess;
1495
1496 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
1497 sock_put(ps->tunnel_sock);
1498 } else
1499 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
1500
1501 err = -EFAULT;
1502 if (put_user(len, (int __user *) optlen))
1503 goto end_put_sess;
1504
1505 if (copy_to_user((void __user *) optval, &val, len))
1506 goto end_put_sess;
1507
1508 err = 0;
1509
1510end_put_sess:
1511 sock_put(sk);
1512end:
1513 return err;
1514}
1515
1516/*****************************************************************************
1517 * /proc filesystem for debug
1518 * Since the original pppol2tp driver provided /proc/net/pppol2tp for
1519 * L2TPv2, we dump only L2TPv2 tunnels and sessions here.
1520 *****************************************************************************/
1521
1522static unsigned int pppol2tp_net_id;
1523
1524#ifdef CONFIG_PROC_FS
1525
1526struct pppol2tp_seq_data {
1527 struct seq_net_private p;
1528 int tunnel_idx; /* current tunnel */
1529 int session_idx; /* index of session within current tunnel */
1530 struct l2tp_tunnel *tunnel;
1531 struct l2tp_session *session; /* NULL means get next tunnel */
1532};
1533
1534static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1535{
1536 for (;;) {
1537 pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx);
1538 pd->tunnel_idx++;
1539
1540 if (pd->tunnel == NULL)
1541 break;
1542
1543 /* Ignore L2TPv3 tunnels */
1544 if (pd->tunnel->version < 3)
1545 break;
1546 }
1547}
1548
1549static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
1550{
1551 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
1552 pd->session_idx++;
1553
1554 if (pd->session == NULL) {
1555 pd->session_idx = 0;
1556 pppol2tp_next_tunnel(net, pd);
1557 }
1558}
1559
1560static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
1561{
1562 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
1563 loff_t pos = *offs;
1564 struct net *net;
1565
1566 if (!pos)
1567 goto out;
1568
1569 BUG_ON(m->private == NULL);
1570 pd = m->private;
1571 net = seq_file_net(m);
1572
1573 if (pd->tunnel == NULL)
1574 pppol2tp_next_tunnel(net, pd);
1575 else
1576 pppol2tp_next_session(net, pd);
1577
1578 /* NULL tunnel and session indicates end of list */
1579 if ((pd->tunnel == NULL) && (pd->session == NULL))
1580 pd = NULL;
1581
1582out:
1583 return pd;
1584}
1585
1586static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
1587{
1588 (*pos)++;
1589 return NULL;
1590}
1591
1592static void pppol2tp_seq_stop(struct seq_file *p, void *v)
1593{
1594 /* nothing to do */
1595}
1596
1597static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
1598{
1599 struct l2tp_tunnel *tunnel = v;
1600
1601 seq_printf(m, "\nTUNNEL '%s', %c %d\n",
1602 tunnel->name,
1603 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
1604 atomic_read(&tunnel->ref_count) - 1);
1605 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
1606 tunnel->debug,
1607 (unsigned long long)tunnel->stats.tx_packets,
1608 (unsigned long long)tunnel->stats.tx_bytes,
1609 (unsigned long long)tunnel->stats.tx_errors,
1610 (unsigned long long)tunnel->stats.rx_packets,
1611 (unsigned long long)tunnel->stats.rx_bytes,
1612 (unsigned long long)tunnel->stats.rx_errors);
1613}
1614
1615static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
1616{
1617 struct l2tp_session *session = v;
1618 struct l2tp_tunnel *tunnel = session->tunnel;
1619 struct pppol2tp_session *ps = l2tp_session_priv(session);
1620 struct pppox_sock *po = pppox_sk(ps->sock);
1621 u32 ip = 0;
1622 u16 port = 0;
1623
1624 if (tunnel->sock) {
1625 struct inet_sock *inet = inet_sk(tunnel->sock);
1626 ip = ntohl(inet->inet_saddr);
1627 port = ntohs(inet->inet_sport);
1628 }
1629
1630 seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
1631 "%04X/%04X %d %c\n",
1632 session->name, ip, port,
1633 tunnel->tunnel_id,
1634 session->session_id,
1635 tunnel->peer_tunnel_id,
1636 session->peer_session_id,
1637 ps->sock->sk_state,
1638 (session == ps->sock->sk_user_data) ?
1639 'Y' : 'N');
1640 seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
1641 session->mtu, session->mru,
1642 session->recv_seq ? 'R' : '-',
1643 session->send_seq ? 'S' : '-',
1644 session->lns_mode ? "LNS" : "LAC",
1645 session->debug,
1646 jiffies_to_msecs(session->reorder_timeout));
1647 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
1648 session->nr, session->ns,
1649 (unsigned long long)session->stats.tx_packets,
1650 (unsigned long long)session->stats.tx_bytes,
1651 (unsigned long long)session->stats.tx_errors,
1652 (unsigned long long)session->stats.rx_packets,
1653 (unsigned long long)session->stats.rx_bytes,
1654 (unsigned long long)session->stats.rx_errors);
1655
1656 if (po)
1657 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
1658}
1659
1660static int pppol2tp_seq_show(struct seq_file *m, void *v)
1661{
1662 struct pppol2tp_seq_data *pd = v;
1663
1664 /* display header on line 1 */
1665 if (v == SEQ_START_TOKEN) {
1666 seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
1667 seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
1668 seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
1669 seq_puts(m, " SESSION name, addr/port src-tid/sid "
1670 "dest-tid/sid state user-data-ok\n");
1671 seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
1672 seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
1673 goto out;
1674 }
1675
1676 /* Show the tunnel or session context.
1677 */
1678 if (pd->session == NULL)
1679 pppol2tp_seq_tunnel_show(m, pd->tunnel);
1680 else
1681 pppol2tp_seq_session_show(m, pd->session);
1682
1683out:
1684 return 0;
1685}
1686
1687static const struct seq_operations pppol2tp_seq_ops = {
1688 .start = pppol2tp_seq_start,
1689 .next = pppol2tp_seq_next,
1690 .stop = pppol2tp_seq_stop,
1691 .show = pppol2tp_seq_show,
1692};
1693
1694/* Called when our /proc file is opened. We allocate data for use when
1695 * iterating our tunnel / session contexts and store it in the private
1696 * data of the seq_file.
1697 */
1698static int pppol2tp_proc_open(struct inode *inode, struct file *file)
1699{
1700 return seq_open_net(inode, file, &pppol2tp_seq_ops,
1701 sizeof(struct pppol2tp_seq_data));
1702}
1703
1704static const struct file_operations pppol2tp_proc_fops = {
1705 .owner = THIS_MODULE,
1706 .open = pppol2tp_proc_open,
1707 .read = seq_read,
1708 .llseek = seq_lseek,
1709 .release = seq_release_net,
1710};
1711
1712#endif /* CONFIG_PROC_FS */
1713
1714/*****************************************************************************
1715 * Network namespace
1716 *****************************************************************************/
1717
1718static __net_init int pppol2tp_init_net(struct net *net)
1719{
1720 struct proc_dir_entry *pde;
1721 int err = 0;
1722
1723 pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
1724 if (!pde) {
1725 err = -ENOMEM;
1726 goto out;
1727 }
1728
1729out:
1730 return err;
1731}
1732
1733static __net_exit void pppol2tp_exit_net(struct net *net)
1734{
1735 proc_net_remove(net, "pppol2tp");
1736}
1737
1738static struct pernet_operations pppol2tp_net_ops = {
1739 .init = pppol2tp_init_net,
1740 .exit = pppol2tp_exit_net,
1741 .id = &pppol2tp_net_id,
1742};
1743
1744/*****************************************************************************
1745 * Init and cleanup
1746 *****************************************************************************/
1747
1748static const struct proto_ops pppol2tp_ops = {
1749 .family = AF_PPPOX,
1750 .owner = THIS_MODULE,
1751 .release = pppol2tp_release,
1752 .bind = sock_no_bind,
1753 .connect = pppol2tp_connect,
1754 .socketpair = sock_no_socketpair,
1755 .accept = sock_no_accept,
1756 .getname = pppol2tp_getname,
1757 .poll = datagram_poll,
1758 .listen = sock_no_listen,
1759 .shutdown = sock_no_shutdown,
1760 .setsockopt = pppol2tp_setsockopt,
1761 .getsockopt = pppol2tp_getsockopt,
1762 .sendmsg = pppol2tp_sendmsg,
1763 .recvmsg = pppol2tp_recvmsg,
1764 .mmap = sock_no_mmap,
1765 .ioctl = pppox_ioctl,
1766};
1767
1768static struct pppox_proto pppol2tp_proto = {
1769 .create = pppol2tp_create,
1770 .ioctl = pppol2tp_ioctl
1771};
1772
1773#ifdef CONFIG_L2TP_V3
1774
1775static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
1776 .session_create = pppol2tp_session_create,
1777 .session_delete = pppol2tp_session_delete,
1778};
1779
1780#endif /* CONFIG_L2TP_V3 */
1781
1782static int __init pppol2tp_init(void)
1783{
1784 int err;
1785
1786 err = register_pernet_device(&pppol2tp_net_ops);
1787 if (err)
1788 goto out;
1789
1790 err = proto_register(&pppol2tp_sk_proto, 0);
1791 if (err)
1792 goto out_unregister_pppol2tp_pernet;
1793
1794 err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
1795 if (err)
1796 goto out_unregister_pppol2tp_proto;
1797
1798#ifdef CONFIG_L2TP_V3
1799 err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops);
1800 if (err)
1801 goto out_unregister_pppox;
1802#endif
1803
1804 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
1805 PPPOL2TP_DRV_VERSION);
1806
1807out:
1808 return err;
1809
1810#ifdef CONFIG_L2TP_V3
1811out_unregister_pppox:
1812 unregister_pppox_proto(PX_PROTO_OL2TP);
1813#endif
1814out_unregister_pppol2tp_proto:
1815 proto_unregister(&pppol2tp_sk_proto);
1816out_unregister_pppol2tp_pernet:
1817 unregister_pernet_device(&pppol2tp_net_ops);
1818 goto out;
1819}
1820
1821static void __exit pppol2tp_exit(void)
1822{
1823#ifdef CONFIG_L2TP_V3
1824 l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP);
1825#endif
1826 unregister_pppox_proto(PX_PROTO_OL2TP);
1827 proto_unregister(&pppol2tp_sk_proto);
1828 unregister_pernet_device(&pppol2tp_net_ops);
1829}
1830
1831module_init(pppol2tp_init);
1832module_exit(pppol2tp_exit);
1833
1834MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1835MODULE_DESCRIPTION("PPP over L2TP over UDP");
1836MODULE_LICENSE("GPL");
1837MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index bda96d18fd98..d5d8d555c410 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -29,6 +29,7 @@
29#include <linux/inet.h> 29#include <linux/inet.h>
30#include <linux/if_arp.h> 30#include <linux/if_arp.h>
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/slab.h>
32#include <net/sock.h> 33#include <net/sock.h>
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/system.h> 35#include <asm/system.h>
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index 6762e7c751eb..21904a002449 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -27,6 +27,7 @@
27#include <linux/inet.h> 27#include <linux/inet.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/slab.h>
30#include <net/sock.h> 31#include <net/sock.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32#include <asm/system.h> 33#include <asm/system.h>
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index 339cc5f2684f..c75a79540f9f 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -25,6 +25,7 @@
25#include <linux/net.h> 25#include <linux/net.h>
26#include <linux/inet.h> 26#include <linux/inet.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/slab.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/system.h> 31#include <asm/system.h>
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index b827f47ac133..43a2a7fb327b 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -24,6 +24,7 @@
24#include <linux/net.h> 24#include <linux/net.h>
25#include <linux/inet.h> 25#include <linux/inet.h>
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/slab.h>
27#include <net/sock.h> 28#include <net/sock.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/system.h> 30#include <asm/system.h>
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index e35d907fba2c..2db6a9f75913 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/rtnetlink.h> 26#include <linux/rtnetlink.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/slab.h>
28#include <net/llc.h> 29#include <net/llc.h>
29#include <net/llc_sap.h> 30#include <net/llc_sap.h>
30#include <net/llc_pdu.h> 31#include <net/llc_pdu.h>
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 86d6985b9d49..ea225bd2672c 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -18,6 +18,7 @@
18 * See the GNU General Public License for more details. 18 * See the GNU General Public License for more details.
19 */ 19 */
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/slab.h>
21#include <net/llc_conn.h> 22#include <net/llc_conn.h>
22#include <net/llc_sap.h> 23#include <net/llc_sap.h>
23#include <net/sock.h> 24#include <net/sock.h>
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index a12144da7974..ba137a6a224d 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h>
16#include <net/llc_sap.h> 17#include <net/llc_sap.h>
17#include <net/llc_conn.h> 18#include <net/llc_conn.h>
18#include <net/sock.h> 19#include <net/sock.h>
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 78167e81dfeb..2bb0ddff8c0f 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -144,12 +144,6 @@ static struct packet_type llc_tr_packet_type __read_mostly = {
144 144
145static int __init llc_init(void) 145static int __init llc_init(void)
146{ 146{
147 struct net_device *dev;
148
149 dev = first_net_device(&init_net);
150 if (dev != NULL)
151 dev = next_net_device(dev);
152
153 dev_add_pack(&llc_packet_type); 147 dev_add_pack(&llc_packet_type);
154 dev_add_pack(&llc_tr_packet_type); 148 dev_add_pack(&llc_tr_packet_type);
155 return 0; 149 return 0;
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index a89917130a7b..25c31c0a3fdb 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -11,6 +11,7 @@
11 * 11 *
12 * See the GNU General Public License for more details. 12 * See the GNU General Public License for more details.
13 */ 13 */
14#include <linux/gfp.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/netdevice.h> 17#include <linux/netdevice.h>
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 57ad974e4d94..f99687439139 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -12,6 +12,7 @@
12 * See the GNU General Public License for more details. 12 * See the GNU General Public License for more details.
13 */ 13 */
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/slab.h>
15#include <net/net_namespace.h> 16#include <net/net_namespace.h>
16#include <net/llc.h> 17#include <net/llc.h>
17#include <net/llc_pdu.h> 18#include <net/llc_pdu.h>
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index ad6e6e1cf22f..a432f0ec051c 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -23,6 +23,7 @@
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <linux/llc.h> 25#include <linux/llc.h>
26#include <linux/slab.h>
26 27
27static int llc_mac_header_len(unsigned short devtype) 28static int llc_mac_header_len(unsigned short devtype)
28{ 29{
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 83da13339490..e4dae0244d76 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/slab.h>
16#include <net/llc.h> 17#include <net/llc.h>
17#include <net/llc_sap.h> 18#include <net/llc_sap.h>
18#include <net/llc_conn.h> 19#include <net/llc_conn.h>
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a952b7f8c648..8a91f6c0bb18 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211"
15 15
16if MAC80211 != n 16if MAC80211 != n
17 17
18config MAC80211_HAS_RC
19 def_bool n
20
18config MAC80211_RC_PID 21config MAC80211_RC_PID
19 bool "PID controller based rate control algorithm" if EMBEDDED 22 bool "PID controller based rate control algorithm" if EMBEDDED
23 select MAC80211_HAS_RC
20 ---help--- 24 ---help---
21 This option enables a TX rate control algorithm for 25 This option enables a TX rate control algorithm for
22 mac80211 that uses a PID controller to select the TX 26 mac80211 that uses a PID controller to select the TX
@@ -24,12 +28,14 @@ config MAC80211_RC_PID
24 28
25config MAC80211_RC_MINSTREL 29config MAC80211_RC_MINSTREL
26 bool "Minstrel" if EMBEDDED 30 bool "Minstrel" if EMBEDDED
31 select MAC80211_HAS_RC
27 default y 32 default y
28 ---help--- 33 ---help---
29 This option enables the 'minstrel' TX rate control algorithm 34 This option enables the 'minstrel' TX rate control algorithm
30 35
31choice 36choice
32 prompt "Default rate control algorithm" 37 prompt "Default rate control algorithm"
38 depends on MAC80211_HAS_RC
33 default MAC80211_RC_DEFAULT_MINSTREL 39 default MAC80211_RC_DEFAULT_MINSTREL
34 ---help--- 40 ---help---
35 This option selects the default rate control algorithm 41 This option selects the default rate control algorithm
@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT
62 68
63endif 69endif
64 70
71comment "Some wireless drivers require a rate control algorithm"
72 depends on MAC80211_HAS_RC=n
73
65config MAC80211_MESH 74config MAC80211_MESH
66 bool "Enable mac80211 mesh networking (pre-802.11s) support" 75 bool "Enable mac80211 mesh networking (pre-802.11s) support"
67 depends on MAC80211 && EXPERIMENTAL 76 depends on MAC80211 && EXPERIMENTAL
@@ -212,8 +221,8 @@ config MAC80211_DRIVER_API_TRACER
212 depends on EVENT_TRACING 221 depends on EVENT_TRACING
213 help 222 help
214 Say Y here to make mac80211 register with the ftrace 223 Say Y here to make mac80211 register with the ftrace
215 framework for the driver API -- you can see which 224 framework for the driver API -- you can then see which
216 driver methods it is calling then by looking at the 225 driver methods it is calling and which API functions
217 trace. 226 drivers are calling by looking at the trace.
218 227
219 If unsure, say N. 228 If unsure, say Y.
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a978e666ed6f..9598fdb4ad01 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <linux/slab.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "driver-ops.h" 20#include "driver-ops.h"
@@ -22,19 +23,20 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
22 u16 initiator, u16 reason) 23 u16 initiator, u16 reason)
23{ 24{
24 struct ieee80211_local *local = sta->local; 25 struct ieee80211_local *local = sta->local;
26 struct tid_ampdu_rx *tid_rx;
25 int i; 27 int i;
26 28
27 /* check if TID is in operational state */
28 spin_lock_bh(&sta->lock); 29 spin_lock_bh(&sta->lock);
29 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) { 30
31 /* check if TID is in operational state */
32 if (!sta->ampdu_mlme.tid_active_rx[tid]) {
30 spin_unlock_bh(&sta->lock); 33 spin_unlock_bh(&sta->lock);
31 return; 34 return;
32 } 35 }
33 36
34 sta->ampdu_mlme.tid_state_rx[tid] = 37 sta->ampdu_mlme.tid_active_rx[tid] = false;
35 HT_AGG_STATE_REQ_STOP_BA_MSK | 38
36 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 39 tid_rx = sta->ampdu_mlme.tid_rx[tid];
37 spin_unlock_bh(&sta->lock);
38 40
39#ifdef CONFIG_MAC80211_HT_DEBUG 41#ifdef CONFIG_MAC80211_HT_DEBUG
40 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", 42 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -46,61 +48,35 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
46 printk(KERN_DEBUG "HW problem - can not stop rx " 48 printk(KERN_DEBUG "HW problem - can not stop rx "
47 "aggregation for tid %d\n", tid); 49 "aggregation for tid %d\n", tid);
48 50
49 /* shutdown timer has not expired */
50 if (initiator != WLAN_BACK_TIMER)
51 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
52
53 /* check if this is a self generated aggregation halt */ 51 /* check if this is a self generated aggregation halt */
54 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) 52 if (initiator == WLAN_BACK_RECIPIENT)
55 ieee80211_send_delba(sta->sdata, sta->sta.addr, 53 ieee80211_send_delba(sta->sdata, sta->sta.addr,
56 tid, 0, reason); 54 tid, 0, reason);
57 55
58 /* free the reordering buffer */ 56 /* free the reordering buffer */
59 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { 57 for (i = 0; i < tid_rx->buf_size; i++) {
60 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { 58 if (tid_rx->reorder_buf[i]) {
61 /* release the reordered frames */ 59 /* release the reordered frames */
62 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); 60 dev_kfree_skb(tid_rx->reorder_buf[i]);
63 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; 61 tid_rx->stored_mpdu_num--;
64 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; 62 tid_rx->reorder_buf[i] = NULL;
65 } 63 }
66 } 64 }
67 65
68 spin_lock_bh(&sta->lock);
69 /* free resources */ 66 /* free resources */
70 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); 67 kfree(tid_rx->reorder_buf);
71 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_time); 68 kfree(tid_rx->reorder_time);
69 sta->ampdu_mlme.tid_rx[tid] = NULL;
72 70
73 if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) {
74 kfree(sta->ampdu_mlme.tid_rx[tid]);
75 sta->ampdu_mlme.tid_rx[tid] = NULL;
76 }
77
78 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
79 spin_unlock_bh(&sta->lock); 71 spin_unlock_bh(&sta->lock);
80}
81
82void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
83 u16 initiator, u16 reason)
84{
85 struct sta_info *sta;
86
87 rcu_read_lock();
88
89 sta = sta_info_get(sdata, ra);
90 if (!sta) {
91 rcu_read_unlock();
92 return;
93 }
94
95 __ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
96 72
97 rcu_read_unlock(); 73 del_timer_sync(&tid_rx->session_timer);
74 kfree(tid_rx);
98} 75}
99 76
100/* 77/*
101 * After accepting the AddBA Request we activated a timer, 78 * After accepting the AddBA Request we activated a timer,
102 * resetting it after each frame that arrives from the originator. 79 * resetting it after each frame that arrives from the originator.
103 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
104 */ 80 */
105static void sta_rx_agg_session_timer_expired(unsigned long data) 81static void sta_rx_agg_session_timer_expired(unsigned long data)
106{ 82{
@@ -116,9 +92,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
116#ifdef CONFIG_MAC80211_HT_DEBUG 92#ifdef CONFIG_MAC80211_HT_DEBUG
117 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 93 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
118#endif 94#endif
119 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, 95 __ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT,
120 (u16)*ptid, WLAN_BACK_TIMER, 96 WLAN_REASON_QSTA_TIMEOUT);
121 WLAN_REASON_QSTA_TIMEOUT);
122} 97}
123 98
124static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, 99static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
@@ -193,7 +168,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
193 168
194 status = WLAN_STATUS_REQUEST_DECLINED; 169 status = WLAN_STATUS_REQUEST_DECLINED;
195 170
196 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { 171 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
197#ifdef CONFIG_MAC80211_HT_DEBUG 172#ifdef CONFIG_MAC80211_HT_DEBUG
198 printk(KERN_DEBUG "Suspend in progress. " 173 printk(KERN_DEBUG "Suspend in progress. "
199 "Denying ADDBA request\n"); 174 "Denying ADDBA request\n");
@@ -231,7 +206,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
231 /* examine state machine */ 206 /* examine state machine */
232 spin_lock_bh(&sta->lock); 207 spin_lock_bh(&sta->lock);
233 208
234 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { 209 if (sta->ampdu_mlme.tid_active_rx[tid]) {
235#ifdef CONFIG_MAC80211_HT_DEBUG 210#ifdef CONFIG_MAC80211_HT_DEBUG
236 if (net_ratelimit()) 211 if (net_ratelimit())
237 printk(KERN_DEBUG "unexpected AddBA Req from " 212 printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -293,7 +268,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
293 } 268 }
294 269
295 /* change state and send addba resp */ 270 /* change state and send addba resp */
296 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; 271 sta->ampdu_mlme.tid_active_rx[tid] = true;
297 tid_agg_rx->dialog_token = dialog_token; 272 tid_agg_rx->dialog_token = dialog_token;
298 tid_agg_rx->ssn = start_seq_num; 273 tid_agg_rx->ssn = start_seq_num;
299 tid_agg_rx->head_seq_num = start_seq_num; 274 tid_agg_rx->head_seq_num = start_seq_num;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5538e1b4a697..608063f11797 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <linux/slab.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "driver-ops.h" 20#include "driver-ops.h"
@@ -214,6 +215,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
214 int ret = 0; 215 int ret = 0;
215 u16 start_seq_num; 216 u16 start_seq_num;
216 217
218 trace_api_start_tx_ba_session(pubsta, tid);
219
217 if (WARN_ON(!local->ops->ampdu_action)) 220 if (WARN_ON(!local->ops->ampdu_action))
218 return -EINVAL; 221 return -EINVAL;
219 222
@@ -245,7 +248,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
245 return -EINVAL; 248 return -EINVAL;
246 } 249 }
247 250
248 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { 251 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
249#ifdef CONFIG_MAC80211_HT_DEBUG 252#ifdef CONFIG_MAC80211_HT_DEBUG
250 printk(KERN_DEBUG "Suspend in progress. " 253 printk(KERN_DEBUG "Suspend in progress. "
251 "Denying BA session request\n"); 254 "Denying BA session request\n");
@@ -414,7 +417,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
414 struct sta_info *sta, u16 tid) 417 struct sta_info *sta, u16 tid)
415{ 418{
416#ifdef CONFIG_MAC80211_HT_DEBUG 419#ifdef CONFIG_MAC80211_HT_DEBUG
417 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); 420 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
418#endif 421#endif
419 422
420 spin_lock(&local->ampdu_lock); 423 spin_lock(&local->ampdu_lock);
@@ -440,6 +443,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
440 struct sta_info *sta; 443 struct sta_info *sta;
441 u8 *state; 444 u8 *state;
442 445
446 trace_api_start_tx_ba_cb(sdata, ra, tid);
447
443 if (tid >= STA_TID_NUM) { 448 if (tid >= STA_TID_NUM) {
444#ifdef CONFIG_MAC80211_HT_DEBUG 449#ifdef CONFIG_MAC80211_HT_DEBUG
445 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 450 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
@@ -541,6 +546,8 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
541 struct ieee80211_sub_if_data *sdata = sta->sdata; 546 struct ieee80211_sub_if_data *sdata = sta->sdata;
542 struct ieee80211_local *local = sdata->local; 547 struct ieee80211_local *local = sdata->local;
543 548
549 trace_api_stop_tx_ba_session(pubsta, tid, initiator);
550
544 if (!local->ops->ampdu_action) 551 if (!local->ops->ampdu_action)
545 return -EINVAL; 552 return -EINVAL;
546 553
@@ -558,6 +565,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
558 struct sta_info *sta; 565 struct sta_info *sta;
559 u8 *state; 566 u8 *state;
560 567
568 trace_api_stop_tx_ba_cb(sdata, ra, tid);
569
561 if (tid >= STA_TID_NUM) { 570 if (tid >= STA_TID_NUM) {
562#ifdef CONFIG_MAC80211_HT_DEBUG 571#ifdef CONFIG_MAC80211_HT_DEBUG
563 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 572 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
@@ -674,7 +683,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
674 del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 683 del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
675 684
676#ifdef CONFIG_MAC80211_HT_DEBUG 685#ifdef CONFIG_MAC80211_HT_DEBUG
677 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); 686 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
678#endif /* CONFIG_MAC80211_HT_DEBUG */ 687#endif /* CONFIG_MAC80211_HT_DEBUG */
679 688
680 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 689 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index b7116ef84a3b..7dd7cda75cfa 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -9,6 +9,7 @@
9#include <linux/ieee80211.h> 9#include <linux/ieee80211.h>
10#include <linux/nl80211.h> 10#include <linux/nl80211.h>
11#include <linux/rtnetlink.h> 11#include <linux/rtnetlink.h>
12#include <linux/slab.h>
12#include <net/net_namespace.h> 13#include <net/net_namespace.h>
13#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
14#include <net/cfg80211.h> 15#include <net/cfg80211.h>
@@ -1136,6 +1137,10 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1136 return -EINVAL; 1137 return -EINVAL;
1137 } 1138 }
1138 1139
1140 /* enable WMM or activate new settings */
1141 local->hw.conf.flags |= IEEE80211_CONF_QOS;
1142 drv_config(local, IEEE80211_CONF_CHANGE_QOS);
1143
1139 return 0; 1144 return 0;
1140} 1145}
1141 1146
@@ -1402,6 +1407,35 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1402 return 0; 1407 return 0;
1403} 1408}
1404 1409
1410static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
1411 struct net_device *dev,
1412 s32 rssi_thold, u32 rssi_hyst)
1413{
1414 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1415 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1416 struct ieee80211_vif *vif = &sdata->vif;
1417 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1418
1419 if (rssi_thold == bss_conf->cqm_rssi_thold &&
1420 rssi_hyst == bss_conf->cqm_rssi_hyst)
1421 return 0;
1422
1423 bss_conf->cqm_rssi_thold = rssi_thold;
1424 bss_conf->cqm_rssi_hyst = rssi_hyst;
1425
1426 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
1427 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1428 return -EOPNOTSUPP;
1429 return 0;
1430 }
1431
1432 /* tell the driver upon association, unless already associated */
1433 if (sdata->u.mgd.associated)
1434 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
1435
1436 return 0;
1437}
1438
1405static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, 1439static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1406 struct net_device *dev, 1440 struct net_device *dev,
1407 const u8 *addr, 1441 const u8 *addr,
@@ -1506,4 +1540,5 @@ struct cfg80211_ops mac80211_config_ops = {
1506 .remain_on_channel = ieee80211_remain_on_channel, 1540 .remain_on_channel = ieee80211_remain_on_channel,
1507 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, 1541 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
1508 .action = ieee80211_action, 1542 .action = ieee80211_action,
1543 .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
1509}; 1544};
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index d12e743cb4e1..97c9e46e859e 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/kobject.h> 11#include <linux/kobject.h>
12#include <linux/slab.h>
12#include "ieee80211_i.h" 13#include "ieee80211_i.h"
13#include "key.h" 14#include "key.h"
14#include "debugfs.h" 15#include "debugfs.h"
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index b4ddb2f83914..20b2998fa0ed 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -13,6 +13,7 @@
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include <linux/slab.h>
16#include <linux/notifier.h> 17#include <linux/notifier.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include <net/cfg80211.h> 19#include <net/cfg80211.h>
@@ -99,6 +100,14 @@ static ssize_t ieee80211_if_fmt_##name( \
99 return scnprintf(buf, buflen, "%pM\n", sdata->field); \ 100 return scnprintf(buf, buflen, "%pM\n", sdata->field); \
100} 101}
101 102
103#define IEEE80211_IF_FMT_DEC_DIV_16(name, field) \
104static ssize_t ieee80211_if_fmt_##name( \
105 const struct ieee80211_sub_if_data *sdata, \
106 char *buf, int buflen) \
107{ \
108 return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \
109}
110
102#define __IEEE80211_IF_FILE(name, _write) \ 111#define __IEEE80211_IF_FILE(name, _write) \
103static ssize_t ieee80211_if_read_##name(struct file *file, \ 112static ssize_t ieee80211_if_read_##name(struct file *file, \
104 char __user *userbuf, \ 113 char __user *userbuf, \
@@ -139,6 +148,8 @@ IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
139/* STA attributes */ 148/* STA attributes */
140IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); 149IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
141IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); 150IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
151IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC);
152IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16);
142 153
143static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, 154static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
144 enum ieee80211_smps_mode smps_mode) 155 enum ieee80211_smps_mode smps_mode)
@@ -275,6 +286,8 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
275 286
276 DEBUGFS_ADD(bssid); 287 DEBUGFS_ADD(bssid);
277 DEBUGFS_ADD(aid); 288 DEBUGFS_ADD(aid);
289 DEBUGFS_ADD(last_beacon);
290 DEBUGFS_ADD(ave_beacon);
278 DEBUGFS_ADD_MODE(smps, 0600); 291 DEBUGFS_ADD_MODE(smps, 0600);
279} 292}
280 293
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d92800bb2d2f..6bc9b07c3eda 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -57,7 +57,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU);
57STA_FILE(tx_retry_failed, tx_retry_failed, LU); 57STA_FILE(tx_retry_failed, tx_retry_failed, LU);
58STA_FILE(tx_retry_count, tx_retry_count, LU); 58STA_FILE(tx_retry_count, tx_retry_count, LU);
59STA_FILE(last_signal, last_signal, D); 59STA_FILE(last_signal, last_signal, D);
60STA_FILE(last_noise, last_noise, D);
61STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); 60STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
62 61
63static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 62static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
@@ -120,7 +119,7 @@ STA_OPS(last_seq_ctrl);
120static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 119static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
121 size_t count, loff_t *ppos) 120 size_t count, loff_t *ppos)
122{ 121{
123 char buf[64 + STA_TID_NUM * 40], *p = buf; 122 char buf[71 + STA_TID_NUM * 40], *p = buf;
124 int i; 123 int i;
125 struct sta_info *sta = file->private_data; 124 struct sta_info *sta = file->private_data;
126 125
@@ -128,16 +127,16 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
128 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", 127 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
129 sta->ampdu_mlme.dialog_token_allocator + 1); 128 sta->ampdu_mlme.dialog_token_allocator + 1);
130 p += scnprintf(p, sizeof(buf) + buf - p, 129 p += scnprintf(p, sizeof(buf) + buf - p,
131 "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); 130 "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
132 for (i = 0; i < STA_TID_NUM; i++) { 131 for (i = 0; i < STA_TID_NUM; i++) {
133 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); 132 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
134 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", 133 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
135 sta->ampdu_mlme.tid_state_rx[i]); 134 sta->ampdu_mlme.tid_active_rx[i]);
136 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", 135 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
137 sta->ampdu_mlme.tid_state_rx[i] ? 136 sta->ampdu_mlme.tid_active_rx[i] ?
138 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); 137 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
139 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", 138 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
140 sta->ampdu_mlme.tid_state_rx[i] ? 139 sta->ampdu_mlme.tid_active_rx[i] ?
141 sta->ampdu_mlme.tid_rx[i]->ssn : 0); 140 sta->ampdu_mlme.tid_rx[i]->ssn : 0);
142 141
143 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", 142 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
@@ -177,7 +176,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
177 if (htc->ht_supported) { 176 if (htc->ht_supported) {
178 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); 177 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
179 178
180 PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP"); 179 PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC");
181 PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); 180 PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
182 PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); 181 PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
183 182
@@ -289,7 +288,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
289 DEBUGFS_ADD(tx_retry_failed); 288 DEBUGFS_ADD(tx_retry_failed);
290 DEBUGFS_ADD(tx_retry_count); 289 DEBUGFS_ADD(tx_retry_count);
291 DEBUGFS_ADD(last_signal); 290 DEBUGFS_ADD(last_signal);
292 DEBUGFS_ADD(last_noise);
293 DEBUGFS_ADD(wep_weak_iv_count); 291 DEBUGFS_ADD(wep_weak_iv_count);
294 DEBUGFS_ADD(ht_capa); 292 DEBUGFS_ADD(ht_capa);
295} 293}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c3d844093a2f..9179196da264 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -84,16 +84,14 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
84} 84}
85 85
86static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 86static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
87 int mc_count, 87 struct netdev_hw_addr_list *mc_list)
88 struct dev_addr_list *mc_list)
89{ 88{
90 u64 ret = 0; 89 u64 ret = 0;
91 90
92 if (local->ops->prepare_multicast) 91 if (local->ops->prepare_multicast)
93 ret = local->ops->prepare_multicast(&local->hw, mc_count, 92 ret = local->ops->prepare_multicast(&local->hw, mc_list);
94 mc_list);
95 93
96 trace_drv_prepare_multicast(local, mc_count, ret); 94 trace_drv_prepare_multicast(local, mc_list->count, ret);
97 95
98 return ret; 96 return ret;
99} 97}
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 41baf730a5c7..e209cb82ff29 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -32,6 +32,10 @@ static inline void trace_ ## name(proto) {}
32#define VIF_PR_FMT " vif:%s(%d)" 32#define VIF_PR_FMT " vif:%s(%d)"
33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type 33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
34 34
35/*
36 * Tracing for driver callbacks.
37 */
38
35TRACE_EVENT(drv_start, 39TRACE_EVENT(drv_start,
36 TP_PROTO(struct ieee80211_local *local, int ret), 40 TP_PROTO(struct ieee80211_local *local, int ret),
37 41
@@ -766,6 +770,277 @@ TRACE_EVENT(drv_flush,
766 LOCAL_PR_ARG, __entry->drop 770 LOCAL_PR_ARG, __entry->drop
767 ) 771 )
768); 772);
773
774/*
775 * Tracing for API calls that drivers call.
776 */
777
778TRACE_EVENT(api_start_tx_ba_session,
779 TP_PROTO(struct ieee80211_sta *sta, u16 tid),
780
781 TP_ARGS(sta, tid),
782
783 TP_STRUCT__entry(
784 STA_ENTRY
785 __field(u16, tid)
786 ),
787
788 TP_fast_assign(
789 STA_ASSIGN;
790 __entry->tid = tid;
791 ),
792
793 TP_printk(
794 STA_PR_FMT " tid:%d",
795 STA_PR_ARG, __entry->tid
796 )
797);
798
799TRACE_EVENT(api_start_tx_ba_cb,
800 TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
801
802 TP_ARGS(sdata, ra, tid),
803
804 TP_STRUCT__entry(
805 VIF_ENTRY
806 __array(u8, ra, ETH_ALEN)
807 __field(u16, tid)
808 ),
809
810 TP_fast_assign(
811 VIF_ASSIGN;
812 memcpy(__entry->ra, ra, ETH_ALEN);
813 __entry->tid = tid;
814 ),
815
816 TP_printk(
817 VIF_PR_FMT " ra:%pM tid:%d",
818 VIF_PR_ARG, __entry->ra, __entry->tid
819 )
820);
821
822TRACE_EVENT(api_stop_tx_ba_session,
823 TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator),
824
825 TP_ARGS(sta, tid, initiator),
826
827 TP_STRUCT__entry(
828 STA_ENTRY
829 __field(u16, tid)
830 __field(u16, initiator)
831 ),
832
833 TP_fast_assign(
834 STA_ASSIGN;
835 __entry->tid = tid;
836 __entry->initiator = initiator;
837 ),
838
839 TP_printk(
840 STA_PR_FMT " tid:%d initiator:%d",
841 STA_PR_ARG, __entry->tid, __entry->initiator
842 )
843);
844
845TRACE_EVENT(api_stop_tx_ba_cb,
846 TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
847
848 TP_ARGS(sdata, ra, tid),
849
850 TP_STRUCT__entry(
851 VIF_ENTRY
852 __array(u8, ra, ETH_ALEN)
853 __field(u16, tid)
854 ),
855
856 TP_fast_assign(
857 VIF_ASSIGN;
858 memcpy(__entry->ra, ra, ETH_ALEN);
859 __entry->tid = tid;
860 ),
861
862 TP_printk(
863 VIF_PR_FMT " ra:%pM tid:%d",
864 VIF_PR_ARG, __entry->ra, __entry->tid
865 )
866);
867
868TRACE_EVENT(api_restart_hw,
869 TP_PROTO(struct ieee80211_local *local),
870
871 TP_ARGS(local),
872
873 TP_STRUCT__entry(
874 LOCAL_ENTRY
875 ),
876
877 TP_fast_assign(
878 LOCAL_ASSIGN;
879 ),
880
881 TP_printk(
882 LOCAL_PR_FMT,
883 LOCAL_PR_ARG
884 )
885);
886
887TRACE_EVENT(api_beacon_loss,
888 TP_PROTO(struct ieee80211_sub_if_data *sdata),
889
890 TP_ARGS(sdata),
891
892 TP_STRUCT__entry(
893 VIF_ENTRY
894 ),
895
896 TP_fast_assign(
897 VIF_ASSIGN;
898 ),
899
900 TP_printk(
901 VIF_PR_FMT,
902 VIF_PR_ARG
903 )
904);
905
906TRACE_EVENT(api_connection_loss,
907 TP_PROTO(struct ieee80211_sub_if_data *sdata),
908
909 TP_ARGS(sdata),
910
911 TP_STRUCT__entry(
912 VIF_ENTRY
913 ),
914
915 TP_fast_assign(
916 VIF_ASSIGN;
917 ),
918
919 TP_printk(
920 VIF_PR_FMT,
921 VIF_PR_ARG
922 )
923);
924
925TRACE_EVENT(api_cqm_rssi_notify,
926 TP_PROTO(struct ieee80211_sub_if_data *sdata,
927 enum nl80211_cqm_rssi_threshold_event rssi_event),
928
929 TP_ARGS(sdata, rssi_event),
930
931 TP_STRUCT__entry(
932 VIF_ENTRY
933 __field(u32, rssi_event)
934 ),
935
936 TP_fast_assign(
937 VIF_ASSIGN;
938 __entry->rssi_event = rssi_event;
939 ),
940
941 TP_printk(
942 VIF_PR_FMT " event:%d",
943 VIF_PR_ARG, __entry->rssi_event
944 )
945);
946
947TRACE_EVENT(api_scan_completed,
948 TP_PROTO(struct ieee80211_local *local, bool aborted),
949
950 TP_ARGS(local, aborted),
951
952 TP_STRUCT__entry(
953 LOCAL_ENTRY
954 __field(bool, aborted)
955 ),
956
957 TP_fast_assign(
958 LOCAL_ASSIGN;
959 __entry->aborted = aborted;
960 ),
961
962 TP_printk(
963 LOCAL_PR_FMT " aborted:%d",
964 LOCAL_PR_ARG, __entry->aborted
965 )
966);
967
968TRACE_EVENT(api_sta_block_awake,
969 TP_PROTO(struct ieee80211_local *local,
970 struct ieee80211_sta *sta, bool block),
971
972 TP_ARGS(local, sta, block),
973
974 TP_STRUCT__entry(
975 LOCAL_ENTRY
976 STA_ENTRY
977 __field(bool, block)
978 ),
979
980 TP_fast_assign(
981 LOCAL_ASSIGN;
982 STA_ASSIGN;
983 __entry->block = block;
984 ),
985
986 TP_printk(
987 LOCAL_PR_FMT STA_PR_FMT " block:%d",
988 LOCAL_PR_ARG, STA_PR_FMT, __entry->block
989 )
990);
991
992/*
993 * Tracing for internal functions
994 * (which may also be called in response to driver calls)
995 */
996
997TRACE_EVENT(wake_queue,
998 TP_PROTO(struct ieee80211_local *local, u16 queue,
999 enum queue_stop_reason reason),
1000
1001 TP_ARGS(local, queue, reason),
1002
1003 TP_STRUCT__entry(
1004 LOCAL_ENTRY
1005 __field(u16, queue)
1006 __field(u32, reason)
1007 ),
1008
1009 TP_fast_assign(
1010 LOCAL_ASSIGN;
1011 __entry->queue = queue;
1012 __entry->reason = reason;
1013 ),
1014
1015 TP_printk(
1016 LOCAL_PR_FMT " queue:%d, reason:%d",
1017 LOCAL_PR_ARG, __entry->queue, __entry->reason
1018 )
1019);
1020
1021TRACE_EVENT(stop_queue,
1022 TP_PROTO(struct ieee80211_local *local, u16 queue,
1023 enum queue_stop_reason reason),
1024
1025 TP_ARGS(local, queue, reason),
1026
1027 TP_STRUCT__entry(
1028 LOCAL_ENTRY
1029 __field(u16, queue)
1030 __field(u32, reason)
1031 ),
1032
1033 TP_fast_assign(
1034 LOCAL_ASSIGN;
1035 __entry->queue = queue;
1036 __entry->reason = reason;
1037 ),
1038
1039 TP_printk(
1040 LOCAL_PR_FMT " queue:%d, reason:%d",
1041 LOCAL_PR_ARG, __entry->queue, __entry->reason
1042 )
1043);
769#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 1044#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
770 1045
771#undef TRACE_INCLUDE_PATH 1046#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index bb677a73b7c9..2ab106a0a491 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -175,8 +175,7 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
175#endif /* CONFIG_MAC80211_HT_DEBUG */ 175#endif /* CONFIG_MAC80211_HT_DEBUG */
176 176
177 if (initiator == WLAN_BACK_INITIATOR) 177 if (initiator == WLAN_BACK_INITIATOR)
178 ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid, 178 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
179 WLAN_BACK_INITIATOR, 0);
180 else { /* WLAN_BACK_RECIPIENT */ 179 else { /* WLAN_BACK_RECIPIENT */
181 spin_lock_bh(&sta->lock); 180 spin_lock_bh(&sta->lock);
182 if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) 181 if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index f3e942486749..e6f3b0c7a71f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/slab.h>
16#include <linux/if_ether.h> 17#include <linux/if_ether.h>
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
18#include <linux/if_arp.h> 19#include <linux/if_arp.h>
@@ -264,17 +265,16 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
264 sta->sta.supp_rates[band] = supp_rates | 265 sta->sta.supp_rates[band] = supp_rates |
265 ieee80211_mandatory_rates(local, band); 266 ieee80211_mandatory_rates(local, band);
266 267
268 if (sta->sta.supp_rates[band] != prev_rates) {
267#ifdef CONFIG_MAC80211_IBSS_DEBUG 269#ifdef CONFIG_MAC80211_IBSS_DEBUG
268 if (sta->sta.supp_rates[band] != prev_rates)
269 printk(KERN_DEBUG "%s: updated supp_rates set " 270 printk(KERN_DEBUG "%s: updated supp_rates set "
270 "for %pM based on beacon info (0x%llx | " 271 "for %pM based on beacon/probe_response "
271 "0x%llx -> 0x%llx)\n", 272 "(0x%x -> 0x%x)\n",
272 sdata->name, 273 sdata->name, sta->sta.addr,
273 sta->sta.addr, 274 prev_rates, sta->sta.supp_rates[band]);
274 (unsigned long long) prev_rates,
275 (unsigned long long) supp_rates,
276 (unsigned long long) sta->sta.supp_rates[band]);
277#endif 275#endif
276 rate_control_rate_init(sta);
277 }
278 rcu_read_unlock(); 278 rcu_read_unlock();
279 } else { 279 } else {
280 rcu_read_unlock(); 280 rcu_read_unlock();
@@ -370,6 +370,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
370 sdata->name, mgmt->bssid); 370 sdata->name, mgmt->bssid);
371#endif 371#endif
372 ieee80211_sta_join_ibss(sdata, bss); 372 ieee80211_sta_join_ibss(sdata, bss);
373 supp_rates = ieee80211_sta_get_rates(local, elems, band);
373 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 374 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
374 supp_rates, GFP_KERNEL); 375 supp_rates, GFP_KERNEL);
375 } 376 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 241533e1bc03..c9712f35e596 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -317,6 +317,7 @@ enum ieee80211_sta_flags {
317 IEEE80211_STA_MFP_ENABLED = BIT(6), 317 IEEE80211_STA_MFP_ENABLED = BIT(6),
318 IEEE80211_STA_UAPSD_ENABLED = BIT(7), 318 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
319 IEEE80211_STA_NULLFUNC_ACKED = BIT(8), 319 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
320 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
320}; 321};
321 322
322struct ieee80211_if_managed { 323struct ieee80211_if_managed {
@@ -327,7 +328,7 @@ struct ieee80211_if_managed {
327 struct work_struct work; 328 struct work_struct work;
328 struct work_struct monitor_work; 329 struct work_struct monitor_work;
329 struct work_struct chswitch_work; 330 struct work_struct chswitch_work;
330 struct work_struct beacon_loss_work; 331 struct work_struct beacon_connection_loss_work;
331 332
332 unsigned long probe_timeout; 333 unsigned long probe_timeout;
333 int probe_send_count; 334 int probe_send_count;
@@ -359,6 +360,24 @@ struct ieee80211_if_managed {
359 int wmm_last_param_set; 360 int wmm_last_param_set;
360 361
361 u8 use_4addr; 362 u8 use_4addr;
363
364 /* Signal strength from the last Beacon frame in the current BSS. */
365 int last_beacon_signal;
366
367 /*
368 * Weighted average of the signal strength from Beacon frames in the
369 * current BSS. This is in units of 1/16 of the signal unit to maintain
370 * accuracy and to speed up calculations, i.e., the value need to be
371 * divided by 16 to get the actual value.
372 */
373 int ave_beacon_signal;
374
375 /*
376 * Last Beacon frame signal strength average (ave_beacon_signal / 16)
377 * that triggered a cqm event. 0 indicates that no event has been
378 * generated for the current association.
379 */
380 int last_cqm_event_signal;
362}; 381};
363 382
364enum ieee80211_ibss_request { 383enum ieee80211_ibss_request {
@@ -646,8 +665,7 @@ struct ieee80211_local {
646 struct work_struct recalc_smps; 665 struct work_struct recalc_smps;
647 666
648 /* aggregated multicast list */ 667 /* aggregated multicast list */
649 struct dev_addr_list *mc_list; 668 struct netdev_hw_addr_list mc_list;
650 int mc_count;
651 669
652 bool tim_in_locked_section; /* see ieee80211_beacon_get() */ 670 bool tim_in_locked_section; /* see ieee80211_beacon_get() */
653 671
@@ -745,6 +763,7 @@ struct ieee80211_local {
745 int scan_channel_idx; 763 int scan_channel_idx;
746 int scan_ies_len; 764 int scan_ies_len;
747 765
766 unsigned long leave_oper_channel_time;
748 enum mac80211_scan_state next_scan_state; 767 enum mac80211_scan_state next_scan_state;
749 struct delayed_work scan_work; 768 struct delayed_work scan_work;
750 struct ieee80211_sub_if_data *scan_sdata; 769 struct ieee80211_sub_if_data *scan_sdata;
@@ -1078,8 +1097,6 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1078 enum ieee80211_smps_mode smps, const u8 *da, 1097 enum ieee80211_smps_mode smps, const u8 *da,
1079 const u8 *bssid); 1098 const u8 *bssid);
1080 1099
1081void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
1082 u16 tid, u16 initiator, u16 reason);
1083void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 1100void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
1084 u16 initiator, u16 reason); 1101 u16 initiator, u16 reason);
1085void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); 1102void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
@@ -1155,7 +1172,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
1155 int powersave); 1172 int powersave);
1156void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 1173void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1157 struct ieee80211_hdr *hdr); 1174 struct ieee80211_hdr *hdr);
1158void ieee80211_beacon_loss_work(struct work_struct *work); 1175void ieee80211_beacon_connection_loss_work(struct work_struct *work);
1159 1176
1160void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1177void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
1161 enum queue_stop_reason reason); 1178 enum queue_stop_reason reason);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 0793d7a8d743..50deb017fd6e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -10,6 +10,7 @@
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13#include <linux/slab.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/if_arp.h> 15#include <linux/if_arp.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
@@ -412,8 +413,7 @@ static int ieee80211_stop(struct net_device *dev)
412 413
413 netif_addr_lock_bh(dev); 414 netif_addr_lock_bh(dev);
414 spin_lock_bh(&local->filter_lock); 415 spin_lock_bh(&local->filter_lock);
415 __dev_addr_unsync(&local->mc_list, &local->mc_count, 416 __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len);
416 &dev->mc_list, &dev->mc_count);
417 spin_unlock_bh(&local->filter_lock); 417 spin_unlock_bh(&local->filter_lock);
418 netif_addr_unlock_bh(dev); 418 netif_addr_unlock_bh(dev);
419 419
@@ -486,7 +486,7 @@ static int ieee80211_stop(struct net_device *dev)
486 cancel_work_sync(&sdata->u.mgd.work); 486 cancel_work_sync(&sdata->u.mgd.work);
487 cancel_work_sync(&sdata->u.mgd.chswitch_work); 487 cancel_work_sync(&sdata->u.mgd.chswitch_work);
488 cancel_work_sync(&sdata->u.mgd.monitor_work); 488 cancel_work_sync(&sdata->u.mgd.monitor_work);
489 cancel_work_sync(&sdata->u.mgd.beacon_loss_work); 489 cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
490 490
491 /* 491 /*
492 * When we get here, the interface is marked down. 492 * When we get here, the interface is marked down.
@@ -596,8 +596,7 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
596 sdata->flags ^= IEEE80211_SDATA_PROMISC; 596 sdata->flags ^= IEEE80211_SDATA_PROMISC;
597 } 597 }
598 spin_lock_bh(&local->filter_lock); 598 spin_lock_bh(&local->filter_lock);
599 __dev_addr_sync(&local->mc_list, &local->mc_count, 599 __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
600 &dev->mc_list, &dev->mc_count);
601 spin_unlock_bh(&local->filter_lock); 600 spin_unlock_bh(&local->filter_lock);
602 ieee80211_queue_work(&local->hw, &local->reconfig_filter); 601 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
603} 602}
@@ -815,6 +814,118 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
815 return 0; 814 return 0;
816} 815}
817 816
817static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
818 struct net_device *dev,
819 enum nl80211_iftype type)
820{
821 struct ieee80211_sub_if_data *sdata;
822 u64 mask, start, addr, val, inc;
823 u8 *m;
824 u8 tmp_addr[ETH_ALEN];
825 int i;
826
827 /* default ... something at least */
828 memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
829
830 if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
831 local->hw.wiphy->n_addresses <= 1)
832 return;
833
834
835 mutex_lock(&local->iflist_mtx);
836
837 switch (type) {
838 case NL80211_IFTYPE_MONITOR:
839 /* doesn't matter */
840 break;
841 case NL80211_IFTYPE_WDS:
842 case NL80211_IFTYPE_AP_VLAN:
843 /* match up with an AP interface */
844 list_for_each_entry(sdata, &local->interfaces, list) {
845 if (sdata->vif.type != NL80211_IFTYPE_AP)
846 continue;
847 memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
848 break;
849 }
850 /* keep default if no AP interface present */
851 break;
852 default:
853 /* assign a new address if possible -- try n_addresses first */
854 for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
855 bool used = false;
856
857 list_for_each_entry(sdata, &local->interfaces, list) {
858 if (memcmp(local->hw.wiphy->addresses[i].addr,
859 sdata->vif.addr, ETH_ALEN) == 0) {
860 used = true;
861 break;
862 }
863 }
864
865 if (!used) {
866 memcpy(dev->perm_addr,
867 local->hw.wiphy->addresses[i].addr,
868 ETH_ALEN);
869 break;
870 }
871 }
872
873 /* try mask if available */
874 if (is_zero_ether_addr(local->hw.wiphy->addr_mask))
875 break;
876
877 m = local->hw.wiphy->addr_mask;
878 mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
879 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
880 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
881
882 if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
883 /* not a contiguous mask ... not handled now! */
884 printk(KERN_DEBUG "not contiguous\n");
885 break;
886 }
887
888 m = local->hw.wiphy->perm_addr;
889 start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
890 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
891 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
892
893 inc = 1ULL<<__ffs64(mask);
894 val = (start & mask);
895 addr = (start & ~mask) | (val & mask);
896 do {
897 bool used = false;
898
899 tmp_addr[5] = addr >> 0*8;
900 tmp_addr[4] = addr >> 1*8;
901 tmp_addr[3] = addr >> 2*8;
902 tmp_addr[2] = addr >> 3*8;
903 tmp_addr[1] = addr >> 4*8;
904 tmp_addr[0] = addr >> 5*8;
905
906 val += inc;
907
908 list_for_each_entry(sdata, &local->interfaces, list) {
909 if (memcmp(tmp_addr, sdata->vif.addr,
910 ETH_ALEN) == 0) {
911 used = true;
912 break;
913 }
914 }
915
916 if (!used) {
917 memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
918 break;
919 }
920 addr = (start & ~mask) | (val & mask);
921 } while (addr != start);
922
923 break;
924 }
925
926 mutex_unlock(&local->iflist_mtx);
927}
928
818int ieee80211_if_add(struct ieee80211_local *local, const char *name, 929int ieee80211_if_add(struct ieee80211_local *local, const char *name,
819 struct net_device **new_dev, enum nl80211_iftype type, 930 struct net_device **new_dev, enum nl80211_iftype type,
820 struct vif_params *params) 931 struct vif_params *params)
@@ -844,8 +955,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
844 if (ret < 0) 955 if (ret < 0)
845 goto fail; 956 goto fail;
846 957
847 memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); 958 ieee80211_assign_perm_addr(local, ndev, type);
848 memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); 959 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
849 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 960 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
850 961
851 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 962 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 8160d9c5372e..e8f6e3b252d8 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -14,6 +14,7 @@
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/slab.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "driver-ops.h" 20#include "driver-ops.h"
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 162a643f16b6..063aad944246 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -8,6 +8,7 @@
8 8
9/* just for IFNAMSIZ */ 9/* just for IFNAMSIZ */
10#include <linux/if.h> 10#include <linux/if.h>
11#include <linux/slab.h>
11#include "led.h" 12#include "led.h"
12 13
13void ieee80211_led_rx(struct ieee80211_local *local) 14void ieee80211_led_rx(struct ieee80211_local *local)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 06c33b68d8e5..011ee85bcd57 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -71,7 +71,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
71 spin_lock_bh(&local->filter_lock); 71 spin_lock_bh(&local->filter_lock);
72 changed_flags = local->filter_flags ^ new_flags; 72 changed_flags = local->filter_flags ^ new_flags;
73 73
74 mc = drv_prepare_multicast(local, local->mc_count, local->mc_list); 74 mc = drv_prepare_multicast(local, &local->mc_list);
75 spin_unlock_bh(&local->filter_lock); 75 spin_unlock_bh(&local->filter_lock);
76 76
77 /* be a bit nasty */ 77 /* be a bit nasty */
@@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
225 switch (sdata->vif.type) { 225 switch (sdata->vif.type) {
226 case NL80211_IFTYPE_AP: 226 case NL80211_IFTYPE_AP:
227 sdata->vif.bss_conf.enable_beacon = 227 sdata->vif.bss_conf.enable_beacon =
228 !!rcu_dereference(sdata->u.ap.beacon); 228 !!sdata->u.ap.beacon;
229 break; 229 break;
230 case NL80211_IFTYPE_ADHOC: 230 case NL80211_IFTYPE_ADHOC:
231 sdata->vif.bss_conf.enable_beacon = 231 sdata->vif.bss_conf.enable_beacon =
232 !!rcu_dereference(sdata->u.ibss.presp); 232 !!sdata->u.ibss.presp;
233 break; 233 break;
234 case NL80211_IFTYPE_MESH_POINT: 234 case NL80211_IFTYPE_MESH_POINT:
235 sdata->vif.bss_conf.enable_beacon = true; 235 sdata->vif.bss_conf.enable_beacon = true;
@@ -309,6 +309,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
309{ 309{
310 struct ieee80211_local *local = hw_to_local(hw); 310 struct ieee80211_local *local = hw_to_local(hw);
311 311
312 trace_api_restart_hw(local);
313
312 /* use this reason, __ieee80211_resume will unblock it */ 314 /* use this reason, __ieee80211_resume will unblock it */
313 ieee80211_stop_queues_by_reason(hw, 315 ieee80211_stop_queues_by_reason(hw,
314 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 316 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
@@ -388,6 +390,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
388 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; 390 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
389 391
390 INIT_LIST_HEAD(&local->interfaces); 392 INIT_LIST_HEAD(&local->interfaces);
393
394 __hw_addr_init(&local->mc_list);
395
391 mutex_init(&local->iflist_mtx); 396 mutex_init(&local->iflist_mtx);
392 mutex_init(&local->scan_mtx); 397 mutex_init(&local->scan_mtx);
393 398
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 61080c5fad50..7e93524459fc 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/slab.h>
11#include <asm/unaligned.h> 12#include <asm/unaligned.h>
12#include "ieee80211_i.h" 13#include "ieee80211_i.h"
13#include "mesh.h" 14#include "mesh.h"
@@ -600,10 +601,10 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
600 struct ieee80211_rx_status *rx_status) 601 struct ieee80211_rx_status *rx_status)
601{ 602{
602 switch (mgmt->u.action.category) { 603 switch (mgmt->u.action.category) {
603 case MESH_PLINK_CATEGORY: 604 case WLAN_CATEGORY_MESH_PLINK:
604 mesh_rx_plink_frame(sdata, mgmt, len, rx_status); 605 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
605 break; 606 break;
606 case MESH_PATH_SEL_CATEGORY: 607 case WLAN_CATEGORY_MESH_PATH_SEL:
607 mesh_rx_path_sel_frame(sdata, mgmt, len); 608 mesh_rx_path_sel_frame(sdata, mgmt, len);
608 break; 609 break;
609 } 610 }
@@ -749,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
749 750
750 switch (fc & IEEE80211_FCTL_STYPE) { 751 switch (fc & IEEE80211_FCTL_STYPE) {
751 case IEEE80211_STYPE_ACTION: 752 case IEEE80211_STYPE_ACTION:
752 if (skb->len < IEEE80211_MIN_ACTION_SIZE)
753 return RX_DROP_MONITOR;
754 /* fall through */
755 case IEEE80211_STYPE_PROBE_RESP: 753 case IEEE80211_STYPE_PROBE_RESP:
756 case IEEE80211_STYPE_BEACON: 754 case IEEE80211_STYPE_BEACON:
757 skb_queue_tail(&ifmsh->skb_queue, skb); 755 skb_queue_tail(&ifmsh->skb_queue, skb);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 85562c59d7d6..c88087f1cd0f 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -209,8 +209,6 @@ struct mesh_rmc {
209#define MESH_MAX_MPATHS 1024 209#define MESH_MAX_MPATHS 1024
210 210
211/* Pending ANA approval */ 211/* Pending ANA approval */
212#define MESH_PLINK_CATEGORY 30
213#define MESH_PATH_SEL_CATEGORY 32
214#define MESH_PATH_SEL_ACTION 0 212#define MESH_PATH_SEL_ACTION 0
215 213
216/* PERR reason codes */ 214/* PERR reason codes */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index ce84237ebad3..d89ed7f2592b 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -7,6 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/slab.h>
10#include "mesh.h" 11#include "mesh.h"
11 12
12#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG 13#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
@@ -131,7 +132,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
131 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 132 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
132 /* BSSID == SA */ 133 /* BSSID == SA */
133 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 134 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 135 mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 136 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
136 137
137 switch (action) { 138 switch (action) {
@@ -224,7 +225,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
224 memcpy(mgmt->da, ra, ETH_ALEN); 225 memcpy(mgmt->da, ra, ETH_ALEN);
225 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 226 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
226 /* BSSID is left zeroed, wildcard value */ 227 /* BSSID is left zeroed, wildcard value */
227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 228 mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 229 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
229 ie_len = 15; 230 ie_len = 15;
230 pos = skb_put(skb, 2 + ie_len); 231 pos = skb_put(skb, 2 + ie_len);
@@ -391,7 +392,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
391 if (SN_GT(mpath->sn, orig_sn) || 392 if (SN_GT(mpath->sn, orig_sn) ||
392 (mpath->sn == orig_sn && 393 (mpath->sn == orig_sn &&
393 action == MPATH_PREQ && 394 action == MPATH_PREQ &&
394 new_metric > mpath->metric)) { 395 new_metric >= mpath->metric)) {
395 process = false; 396 process = false;
396 fresh_info = false; 397 fresh_info = false;
397 } 398 }
@@ -611,7 +612,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
611 612
612 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, 613 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
613 cpu_to_le32(orig_sn), 0, target_addr, 614 cpu_to_le32(orig_sn), 0, target_addr,
614 cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, 615 cpu_to_le32(target_sn), next_hop, hopcount,
615 ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), 616 ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
616 0, sdata); 617 0, sdata);
617 rcu_read_unlock(); 618 rcu_read_unlock();
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 2312efe04c62..181ffd6efd81 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -10,6 +10,7 @@
10#include <linux/etherdevice.h> 10#include <linux/etherdevice.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/random.h> 12#include <linux/random.h>
13#include <linux/slab.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <linux/string.h> 15#include <linux/string.h>
15#include <net/mac80211.h> 16#include <net/mac80211.h>
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 1a29c4a8139e..3cd5f7b5d693 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -6,6 +6,7 @@
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9#include <linux/gfp.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/random.h> 11#include <linux/random.h>
11#include "ieee80211_i.h" 12#include "ieee80211_i.h"
@@ -171,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
171 memcpy(mgmt->da, da, ETH_ALEN); 172 memcpy(mgmt->da, da, ETH_ALEN);
172 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 173 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
173 /* BSSID is left zeroed, wildcard value */ 174 /* BSSID is left zeroed, wildcard value */
174 mgmt->u.action.category = MESH_PLINK_CATEGORY; 175 mgmt->u.action.category = WLAN_CATEGORY_MESH_PLINK;
175 mgmt->u.action.u.plink_action.action_code = action; 176 mgmt->u.action.u.plink_action.action_code = action;
176 177
177 if (action == PLINK_CLOSE) 178 if (action == PLINK_CLOSE)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index be5f723d643a..35d850223a75 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -19,6 +19,7 @@
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/pm_qos_params.h> 20#include <linux/pm_qos_params.h>
21#include <linux/crc32.h> 21#include <linux/crc32.h>
22#include <linux/slab.h>
22#include <net/mac80211.h> 23#include <net/mac80211.h>
23#include <asm/unaligned.h> 24#include <asm/unaligned.h>
24 25
@@ -46,6 +47,13 @@
46 */ 47 */
47#define IEEE80211_PROBE_WAIT (HZ / 2) 48#define IEEE80211_PROBE_WAIT (HZ / 2)
48 49
50/*
51 * Weight given to the latest Beacon frame when calculating average signal
52 * strength for Beacon frames received in the current BSS. This must be
53 * between 1 and 15.
54 */
55#define IEEE80211_SIGNAL_AVE_WEIGHT 3
56
49#define TMR_RUNNING_TIMER 0 57#define TMR_RUNNING_TIMER 0
50#define TMR_RUNNING_CHANSW 1 58#define TMR_RUNNING_CHANSW 1
51 59
@@ -203,7 +211,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
203 211
204static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 212static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
205 const u8 *bssid, u16 stype, u16 reason, 213 const u8 *bssid, u16 stype, u16 reason,
206 void *cookie) 214 void *cookie, bool send_frame)
207{ 215{
208 struct ieee80211_local *local = sdata->local; 216 struct ieee80211_local *local = sdata->local;
209 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 217 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -240,7 +248,11 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
240 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); 248 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
241 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) 249 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
242 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 250 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
243 ieee80211_tx_skb(sdata, skb); 251
252 if (send_frame)
253 ieee80211_tx_skb(sdata, skb);
254 else
255 kfree_skb(skb);
244} 256}
245 257
246void ieee80211_send_pspoll(struct ieee80211_local *local, 258void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -589,6 +601,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
589 int count; 601 int count;
590 u8 *pos, uapsd_queues = 0; 602 u8 *pos, uapsd_queues = 0;
591 603
604 if (!local->ops->conf_tx)
605 return;
606
592 if (local->hw.queues < 4) 607 if (local->hw.queues < 4)
593 return; 608 return;
594 609
@@ -663,11 +678,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
663 params.aifs, params.cw_min, params.cw_max, params.txop, 678 params.aifs, params.cw_min, params.cw_max, params.txop,
664 params.uapsd); 679 params.uapsd);
665#endif 680#endif
666 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx) 681 if (drv_conf_tx(local, queue, &params))
667 printk(KERN_DEBUG "%s: failed to set TX queue " 682 printk(KERN_DEBUG "%s: failed to set TX queue "
668 "parameters for queue %d\n", 683 "parameters for queue %d\n",
669 wiphy_name(local->hw.wiphy), queue); 684 wiphy_name(local->hw.wiphy), queue);
670 } 685 }
686
687 /* enable WMM or activate new settings */
688 local->hw.conf.flags |= IEEE80211_CONF_QOS;
689 drv_config(local, IEEE80211_CONF_CHANGE_QOS);
671} 690}
672 691
673static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 692static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
@@ -728,6 +747,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
728 sdata->u.mgd.associated = cbss; 747 sdata->u.mgd.associated = cbss;
729 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); 748 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
730 749
750 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
751
731 /* just to be sure */ 752 /* just to be sure */
732 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 753 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
733 IEEE80211_STA_BEACON_POLL); 754 IEEE80211_STA_BEACON_POLL);
@@ -753,6 +774,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
753 /* And the BSSID changed - we're associated now */ 774 /* And the BSSID changed - we're associated now */
754 bss_info_changed |= BSS_CHANGED_BSSID; 775 bss_info_changed |= BSS_CHANGED_BSSID;
755 776
777 /* Tell the driver to monitor connection quality (if supported) */
778 if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
779 sdata->vif.bss_conf.cqm_rssi_thold)
780 bss_info_changed |= BSS_CHANGED_CQM;
781
756 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 782 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
757 783
758 mutex_lock(&local->iflist_mtx); 784 mutex_lock(&local->iflist_mtx);
@@ -764,7 +790,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
764 netif_carrier_on(sdata->dev); 790 netif_carrier_on(sdata->dev);
765} 791}
766 792
767static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata) 793static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
794 bool remove_sta)
768{ 795{
769 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 796 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
770 struct ieee80211_local *local = sdata->local; 797 struct ieee80211_local *local = sdata->local;
@@ -837,7 +864,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
837 changed |= BSS_CHANGED_BSSID; 864 changed |= BSS_CHANGED_BSSID;
838 ieee80211_bss_info_change_notify(sdata, changed); 865 ieee80211_bss_info_change_notify(sdata, changed);
839 866
840 sta_info_destroy_addr(sdata, bssid); 867 if (remove_sta)
868 sta_info_destroy_addr(sdata, bssid);
841} 869}
842 870
843void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 871void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -854,6 +882,9 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
854 if (is_multicast_ether_addr(hdr->addr1)) 882 if (is_multicast_ether_addr(hdr->addr1))
855 return; 883 return;
856 884
885 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
886 return;
887
857 mod_timer(&sdata->u.mgd.conn_mon_timer, 888 mod_timer(&sdata->u.mgd.conn_mon_timer,
858 round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); 889 round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
859} 890}
@@ -931,23 +962,72 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
931 mutex_unlock(&ifmgd->mtx); 962 mutex_unlock(&ifmgd->mtx);
932} 963}
933 964
934void ieee80211_beacon_loss_work(struct work_struct *work) 965static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
966{
967 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
968 struct ieee80211_local *local = sdata->local;
969 u8 bssid[ETH_ALEN];
970
971 mutex_lock(&ifmgd->mtx);
972 if (!ifmgd->associated) {
973 mutex_unlock(&ifmgd->mtx);
974 return;
975 }
976
977 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
978
979 printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
980
981 ieee80211_set_disassoc(sdata, true);
982 ieee80211_recalc_idle(local);
983 mutex_unlock(&ifmgd->mtx);
984 /*
985 * must be outside lock due to cfg80211,
986 * but that's not a problem.
987 */
988 ieee80211_send_deauth_disassoc(sdata, bssid,
989 IEEE80211_STYPE_DEAUTH,
990 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
991 NULL, true);
992}
993
994void ieee80211_beacon_connection_loss_work(struct work_struct *work)
935{ 995{
936 struct ieee80211_sub_if_data *sdata = 996 struct ieee80211_sub_if_data *sdata =
937 container_of(work, struct ieee80211_sub_if_data, 997 container_of(work, struct ieee80211_sub_if_data,
938 u.mgd.beacon_loss_work); 998 u.mgd.beacon_connection_loss_work);
939 999
940 ieee80211_mgd_probe_ap(sdata, true); 1000 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
1001 __ieee80211_connection_loss(sdata);
1002 else
1003 ieee80211_mgd_probe_ap(sdata, true);
941} 1004}
942 1005
943void ieee80211_beacon_loss(struct ieee80211_vif *vif) 1006void ieee80211_beacon_loss(struct ieee80211_vif *vif)
944{ 1007{
945 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 1008 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1009 struct ieee80211_hw *hw = &sdata->local->hw;
1010
1011 trace_api_beacon_loss(sdata);
946 1012
947 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); 1013 WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR);
1014 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
948} 1015}
949EXPORT_SYMBOL(ieee80211_beacon_loss); 1016EXPORT_SYMBOL(ieee80211_beacon_loss);
950 1017
1018void ieee80211_connection_loss(struct ieee80211_vif *vif)
1019{
1020 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1021 struct ieee80211_hw *hw = &sdata->local->hw;
1022
1023 trace_api_connection_loss(sdata);
1024
1025 WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR));
1026 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
1027}
1028EXPORT_SYMBOL(ieee80211_connection_loss);
1029
1030
951static enum rx_mgmt_action __must_check 1031static enum rx_mgmt_action __must_check
952ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, 1032ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
953 struct ieee80211_mgmt *mgmt, size_t len) 1033 struct ieee80211_mgmt *mgmt, size_t len)
@@ -968,7 +1048,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
968 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 1048 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
969 sdata->name, bssid, reason_code); 1049 sdata->name, bssid, reason_code);
970 1050
971 ieee80211_set_disassoc(sdata); 1051 ieee80211_set_disassoc(sdata, true);
972 ieee80211_recalc_idle(sdata->local); 1052 ieee80211_recalc_idle(sdata->local);
973 1053
974 return RX_MGMT_CFG80211_DEAUTH; 1054 return RX_MGMT_CFG80211_DEAUTH;
@@ -998,7 +1078,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
998 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 1078 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
999 sdata->name, mgmt->sa, reason_code); 1079 sdata->name, mgmt->sa, reason_code);
1000 1080
1001 ieee80211_set_disassoc(sdata); 1081 ieee80211_set_disassoc(sdata, true);
1002 ieee80211_recalc_idle(sdata->local); 1082 ieee80211_recalc_idle(sdata->local);
1003 return RX_MGMT_CFG80211_DISASSOC; 1083 return RX_MGMT_CFG80211_DISASSOC;
1004} 1084}
@@ -1290,6 +1370,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1290 struct ieee80211_rx_status *rx_status) 1370 struct ieee80211_rx_status *rx_status)
1291{ 1371{
1292 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1372 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1373 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1293 size_t baselen; 1374 size_t baselen;
1294 struct ieee802_11_elems elems; 1375 struct ieee802_11_elems elems;
1295 struct ieee80211_local *local = sdata->local; 1376 struct ieee80211_local *local = sdata->local;
@@ -1325,6 +1406,41 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1325 if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0) 1406 if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0)
1326 return; 1407 return;
1327 1408
1409 /* Track average RSSI from the Beacon frames of the current AP */
1410 ifmgd->last_beacon_signal = rx_status->signal;
1411 if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
1412 ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
1413 ifmgd->ave_beacon_signal = rx_status->signal;
1414 ifmgd->last_cqm_event_signal = 0;
1415 } else {
1416 ifmgd->ave_beacon_signal =
1417 (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
1418 (16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
1419 ifmgd->ave_beacon_signal) / 16;
1420 }
1421 if (bss_conf->cqm_rssi_thold &&
1422 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
1423 int sig = ifmgd->ave_beacon_signal / 16;
1424 int last_event = ifmgd->last_cqm_event_signal;
1425 int thold = bss_conf->cqm_rssi_thold;
1426 int hyst = bss_conf->cqm_rssi_hyst;
1427 if (sig < thold &&
1428 (last_event == 0 || sig < last_event - hyst)) {
1429 ifmgd->last_cqm_event_signal = sig;
1430 ieee80211_cqm_rssi_notify(
1431 &sdata->vif,
1432 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
1433 GFP_KERNEL);
1434 } else if (sig > thold &&
1435 (last_event == 0 || sig > last_event + hyst)) {
1436 ifmgd->last_cqm_event_signal = sig;
1437 ieee80211_cqm_rssi_notify(
1438 &sdata->vif,
1439 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
1440 GFP_KERNEL);
1441 }
1442 }
1443
1328 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { 1444 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
1329#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1445#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1330 if (net_ratelimit()) { 1446 if (net_ratelimit()) {
@@ -1610,7 +1726,7 @@ static void ieee80211_sta_work(struct work_struct *work)
1610 printk(KERN_DEBUG "No probe response from AP %pM" 1726 printk(KERN_DEBUG "No probe response from AP %pM"
1611 " after %dms, disconnecting.\n", 1727 " after %dms, disconnecting.\n",
1612 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 1728 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
1613 ieee80211_set_disassoc(sdata); 1729 ieee80211_set_disassoc(sdata, true);
1614 ieee80211_recalc_idle(local); 1730 ieee80211_recalc_idle(local);
1615 mutex_unlock(&ifmgd->mtx); 1731 mutex_unlock(&ifmgd->mtx);
1616 /* 1732 /*
@@ -1620,7 +1736,7 @@ static void ieee80211_sta_work(struct work_struct *work)
1620 ieee80211_send_deauth_disassoc(sdata, bssid, 1736 ieee80211_send_deauth_disassoc(sdata, bssid,
1621 IEEE80211_STYPE_DEAUTH, 1737 IEEE80211_STYPE_DEAUTH,
1622 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 1738 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1623 NULL); 1739 NULL, true);
1624 mutex_lock(&ifmgd->mtx); 1740 mutex_lock(&ifmgd->mtx);
1625 } 1741 }
1626 } 1742 }
@@ -1637,7 +1753,8 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
1637 if (local->quiescing) 1753 if (local->quiescing)
1638 return; 1754 return;
1639 1755
1640 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); 1756 ieee80211_queue_work(&sdata->local->hw,
1757 &sdata->u.mgd.beacon_connection_loss_work);
1641} 1758}
1642 1759
1643static void ieee80211_sta_conn_mon_timer(unsigned long data) 1760static void ieee80211_sta_conn_mon_timer(unsigned long data)
@@ -1689,7 +1806,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
1689 */ 1806 */
1690 1807
1691 cancel_work_sync(&ifmgd->work); 1808 cancel_work_sync(&ifmgd->work);
1692 cancel_work_sync(&ifmgd->beacon_loss_work); 1809 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
1693 if (del_timer_sync(&ifmgd->timer)) 1810 if (del_timer_sync(&ifmgd->timer))
1694 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 1811 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
1695 1812
@@ -1723,7 +1840,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1723 INIT_WORK(&ifmgd->work, ieee80211_sta_work); 1840 INIT_WORK(&ifmgd->work, ieee80211_sta_work);
1724 INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); 1841 INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
1725 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); 1842 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
1726 INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work); 1843 INIT_WORK(&ifmgd->beacon_connection_loss_work,
1844 ieee80211_beacon_connection_loss_work);
1727 setup_timer(&ifmgd->timer, ieee80211_sta_timer, 1845 setup_timer(&ifmgd->timer, ieee80211_sta_timer,
1728 (unsigned long) sdata); 1846 (unsigned long) sdata);
1729 setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 1847 setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -1802,6 +1920,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
1802 struct ieee80211_work *wk; 1920 struct ieee80211_work *wk;
1803 u16 auth_alg; 1921 u16 auth_alg;
1804 1922
1923 if (req->local_state_change)
1924 return 0; /* no need to update mac80211 state */
1925
1805 switch (req->auth_type) { 1926 switch (req->auth_type) {
1806 case NL80211_AUTHTYPE_OPEN_SYSTEM: 1927 case NL80211_AUTHTYPE_OPEN_SYSTEM:
1807 auth_alg = WLAN_AUTH_OPEN; 1928 auth_alg = WLAN_AUTH_OPEN;
@@ -1910,7 +2031,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1910 } 2031 }
1911 2032
1912 /* Trying to reassociate - clear previous association state */ 2033 /* Trying to reassociate - clear previous association state */
1913 ieee80211_set_disassoc(sdata); 2034 ieee80211_set_disassoc(sdata, true);
1914 } 2035 }
1915 mutex_unlock(&ifmgd->mtx); 2036 mutex_unlock(&ifmgd->mtx);
1916 2037
@@ -2014,7 +2135,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2014 2135
2015 if (ifmgd->associated == req->bss) { 2136 if (ifmgd->associated == req->bss) {
2016 bssid = req->bss->bssid; 2137 bssid = req->bss->bssid;
2017 ieee80211_set_disassoc(sdata); 2138 ieee80211_set_disassoc(sdata, true);
2018 mutex_unlock(&ifmgd->mtx); 2139 mutex_unlock(&ifmgd->mtx);
2019 } else { 2140 } else {
2020 bool not_auth_yet = false; 2141 bool not_auth_yet = false;
@@ -2057,9 +2178,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2057 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", 2178 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
2058 sdata->name, bssid, req->reason_code); 2179 sdata->name, bssid, req->reason_code);
2059 2180
2060 ieee80211_send_deauth_disassoc(sdata, bssid, 2181 ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
2061 IEEE80211_STYPE_DEAUTH, req->reason_code, 2182 req->reason_code, cookie,
2062 cookie); 2183 !req->local_state_change);
2063 2184
2064 ieee80211_recalc_idle(sdata->local); 2185 ieee80211_recalc_idle(sdata->local);
2065 2186
@@ -2071,6 +2192,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2071 void *cookie) 2192 void *cookie)
2072{ 2193{
2073 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2194 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2195 u8 bssid[ETH_ALEN];
2074 2196
2075 mutex_lock(&ifmgd->mtx); 2197 mutex_lock(&ifmgd->mtx);
2076 2198
@@ -2088,13 +2210,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2088 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", 2210 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
2089 sdata->name, req->bss->bssid, req->reason_code); 2211 sdata->name, req->bss->bssid, req->reason_code);
2090 2212
2091 ieee80211_set_disassoc(sdata); 2213 memcpy(bssid, req->bss->bssid, ETH_ALEN);
2214 ieee80211_set_disassoc(sdata, false);
2092 2215
2093 mutex_unlock(&ifmgd->mtx); 2216 mutex_unlock(&ifmgd->mtx);
2094 2217
2095 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, 2218 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
2096 IEEE80211_STYPE_DISASSOC, req->reason_code, 2219 IEEE80211_STYPE_DISASSOC, req->reason_code,
2097 cookie); 2220 cookie, !req->local_state_change);
2221 sta_info_destroy_addr(sdata, bssid);
2098 2222
2099 ieee80211_recalc_idle(sdata->local); 2223 ieee80211_recalc_idle(sdata->local);
2100 2224
@@ -2135,3 +2259,15 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
2135 *cookie = (unsigned long) skb; 2259 *cookie = (unsigned long) skb;
2136 return 0; 2260 return 0;
2137} 2261}
2262
2263void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2264 enum nl80211_cqm_rssi_threshold_event rssi_event,
2265 gfp_t gfp)
2266{
2267 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2268
2269 trace_api_cqm_rssi_notify(sdata, rssi_event);
2270
2271 cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
2272}
2273EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 0e64484e861c..75202b295a4e 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -46,7 +46,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
46 46
47 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 47 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
48 list_for_each_entry_rcu(sta, &local->sta_list, list) { 48 list_for_each_entry_rcu(sta, &local->sta_list, list) {
49 set_sta_flags(sta, WLAN_STA_SUSPEND); 49 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
50 ieee80211_sta_tear_down_BA_sessions(sta); 50 ieee80211_sta_tear_down_BA_sessions(sta);
51 } 51 }
52 } 52 }
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 0b299d236fa1..6d0bd198af19 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include <linux/slab.h>
13#include "rate.h" 14#include "rate.h"
14#include "ieee80211_i.h" 15#include "ieee80211_i.h"
15#include "debugfs.h" 16#include "debugfs.h"
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 6e5d68b4e427..f65ce6dcc8e2 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -50,6 +50,7 @@
50#include <linux/debugfs.h> 50#include <linux/debugfs.h>
51#include <linux/random.h> 51#include <linux/random.h>
52#include <linux/ieee80211.h> 52#include <linux/ieee80211.h>
53#include <linux/slab.h>
53#include <net/mac80211.h> 54#include <net/mac80211.h>
54#include "rate.h" 55#include "rate.h"
55#include "rc80211_minstrel.h" 56#include "rc80211_minstrel.h"
@@ -541,7 +542,7 @@ minstrel_free(void *priv)
541 kfree(priv); 542 kfree(priv);
542} 543}
543 544
544static struct rate_control_ops mac80211_minstrel = { 545struct rate_control_ops mac80211_minstrel = {
545 .name = "minstrel", 546 .name = "minstrel",
546 .tx_status = minstrel_tx_status, 547 .tx_status = minstrel_tx_status,
547 .get_rate = minstrel_get_rate, 548 .get_rate = minstrel_get_rate,
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 38bf4168fc3a..0f5a83370aa6 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -80,7 +80,18 @@ struct minstrel_priv {
80 unsigned int lookaround_rate_mrr; 80 unsigned int lookaround_rate_mrr;
81}; 81};
82 82
83struct minstrel_debugfs_info {
84 size_t len;
85 char buf[];
86};
87
88extern struct rate_control_ops mac80211_minstrel;
83void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); 89void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
84void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); 90void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
85 91
92/* debugfs */
93int minstrel_stats_open(struct inode *inode, struct file *file);
94ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos);
95int minstrel_stats_release(struct inode *inode, struct file *file);
96
86#endif 97#endif
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index a715d9454f64..241e76f3fdf2 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -49,24 +49,19 @@
49#include <linux/skbuff.h> 49#include <linux/skbuff.h>
50#include <linux/debugfs.h> 50#include <linux/debugfs.h>
51#include <linux/ieee80211.h> 51#include <linux/ieee80211.h>
52#include <linux/slab.h>
52#include <net/mac80211.h> 53#include <net/mac80211.h>
53#include "rc80211_minstrel.h" 54#include "rc80211_minstrel.h"
54 55
55struct minstrel_stats_info { 56int
56 struct minstrel_sta_info *mi;
57 char buf[4096];
58 size_t len;
59};
60
61static int
62minstrel_stats_open(struct inode *inode, struct file *file) 57minstrel_stats_open(struct inode *inode, struct file *file)
63{ 58{
64 struct minstrel_sta_info *mi = inode->i_private; 59 struct minstrel_sta_info *mi = inode->i_private;
65 struct minstrel_stats_info *ms; 60 struct minstrel_debugfs_info *ms;
66 unsigned int i, tp, prob, eprob; 61 unsigned int i, tp, prob, eprob;
67 char *p; 62 char *p;
68 63
69 ms = kmalloc(sizeof(*ms), GFP_KERNEL); 64 ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL);
70 if (!ms) 65 if (!ms)
71 return -ENOMEM; 66 return -ENOMEM;
72 67
@@ -106,36 +101,19 @@ minstrel_stats_open(struct inode *inode, struct file *file)
106 return 0; 101 return 0;
107} 102}
108 103
109static ssize_t 104ssize_t
110minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o) 105minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
111{ 106{
112 struct minstrel_stats_info *ms; 107 struct minstrel_debugfs_info *ms;
113 char *src;
114 108
115 ms = file->private_data; 109 ms = file->private_data;
116 src = ms->buf; 110 return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
117
118 len = min(len, ms->len);
119 if (len <= *o)
120 return 0;
121
122 src += *o;
123 len -= *o;
124 *o += len;
125
126 if (copy_to_user(buf, src, len))
127 return -EFAULT;
128
129 return len;
130} 111}
131 112
132static int 113int
133minstrel_stats_release(struct inode *inode, struct file *file) 114minstrel_stats_release(struct inode *inode, struct file *file)
134{ 115{
135 struct minstrel_stats_info *ms = file->private_data; 116 kfree(file->private_data);
136
137 kfree(ms);
138
139 return 0; 117 return 0;
140} 118}
141 119
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 2652a374974e..aeda65466f3e 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -13,6 +13,7 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/debugfs.h> 15#include <linux/debugfs.h>
16#include <linux/slab.h>
16#include <net/mac80211.h> 17#include <net/mac80211.h>
17#include "rate.h" 18#include "rate.h"
18#include "mesh.h" 19#include "mesh.h"
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 45667054a5f3..47438b4a9af5 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -12,6 +12,7 @@
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15 16
16#include <net/mac80211.h> 17#include <net/mac80211.h>
17#include "rate.h" 18#include "rate.h"
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index b5c48de81d8b..72efbd87c1eb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/jiffies.h> 12#include <linux/jiffies.h>
13#include <linux/slab.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/skbuff.h> 15#include <linux/skbuff.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
@@ -38,7 +39,7 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
38{ 39{
39 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
40 if (likely(skb->len > FCS_LEN)) 41 if (likely(skb->len > FCS_LEN))
41 skb_trim(skb, skb->len - FCS_LEN); 42 __pskb_trim(skb, skb->len - FCS_LEN);
42 else { 43 else {
43 /* driver bug */ 44 /* driver bug */
44 WARN_ON(1); 45 WARN_ON(1);
@@ -178,14 +179,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
178 pos++; 179 pos++;
179 } 180 }
180 181
181 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
182 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
183 *pos = status->noise;
184 rthdr->it_present |=
185 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
186 pos++;
187 }
188
189 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 182 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
190 183
191 /* IEEE80211_RADIOTAP_ANTENNA */ 184 /* IEEE80211_RADIOTAP_ANTENNA */
@@ -235,6 +228,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
235 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 228 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
236 present_fcs_len = FCS_LEN; 229 present_fcs_len = FCS_LEN;
237 230
231 /* make sure hdr->frame_control is on the linear part */
232 if (!pskb_may_pull(origskb, 2)) {
233 dev_kfree_skb(origskb);
234 return NULL;
235 }
236
238 if (!local->monitors) { 237 if (!local->monitors) {
239 if (should_drop_frame(origskb, present_fcs_len)) { 238 if (should_drop_frame(origskb, present_fcs_len)) {
240 dev_kfree_skb(origskb); 239 dev_kfree_skb(origskb);
@@ -492,7 +491,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
492 491
493 if (ieee80211_is_action(hdr->frame_control)) { 492 if (ieee80211_is_action(hdr->frame_control)) {
494 mgmt = (struct ieee80211_mgmt *)hdr; 493 mgmt = (struct ieee80211_mgmt *)hdr;
495 if (mgmt->u.action.category != MESH_PLINK_CATEGORY) 494 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
496 return RX_DROP_MONITOR; 495 return RX_DROP_MONITOR;
497 return RX_CONTINUE; 496 return RX_CONTINUE;
498 } 497 }
@@ -722,14 +721,16 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
722 721
723 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 722 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
724 723
725 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) 724 spin_lock(&sta->lock);
726 goto dont_reorder; 725
726 if (!sta->ampdu_mlme.tid_active_rx[tid])
727 goto dont_reorder_unlock;
727 728
728 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; 729 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
729 730
730 /* qos null data frames are excluded */ 731 /* qos null data frames are excluded */
731 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 732 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
732 goto dont_reorder; 733 goto dont_reorder_unlock;
733 734
734 /* new, potentially un-ordered, ampdu frame - process it */ 735 /* new, potentially un-ordered, ampdu frame - process it */
735 736
@@ -741,15 +742,20 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
741 /* if this mpdu is fragmented - terminate rx aggregation session */ 742 /* if this mpdu is fragmented - terminate rx aggregation session */
742 sc = le16_to_cpu(hdr->seq_ctrl); 743 sc = le16_to_cpu(hdr->seq_ctrl);
743 if (sc & IEEE80211_SCTL_FRAG) { 744 if (sc & IEEE80211_SCTL_FRAG) {
744 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, 745 spin_unlock(&sta->lock);
745 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); 746 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
747 WLAN_REASON_QSTA_REQUIRE_SETUP);
746 dev_kfree_skb(skb); 748 dev_kfree_skb(skb);
747 return; 749 return;
748 } 750 }
749 751
750 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) 752 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) {
753 spin_unlock(&sta->lock);
751 return; 754 return;
755 }
752 756
757 dont_reorder_unlock:
758 spin_unlock(&sta->lock);
753 dont_reorder: 759 dont_reorder:
754 __skb_queue_tail(frames, skb); 760 __skb_queue_tail(frames, skb);
755} 761}
@@ -896,6 +902,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
896 rx->key = key; 902 rx->key = key;
897 return RX_CONTINUE; 903 return RX_CONTINUE;
898 } else { 904 } else {
905 u8 keyid;
899 /* 906 /*
900 * The device doesn't give us the IV so we won't be 907 * The device doesn't give us the IV so we won't be
901 * able to look up the key. That's ok though, we 908 * able to look up the key. That's ok though, we
@@ -918,7 +925,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
918 * no need to call ieee80211_wep_get_keyidx, 925 * no need to call ieee80211_wep_get_keyidx,
919 * it verifies a bunch of things we've done already 926 * it verifies a bunch of things we've done already
920 */ 927 */
921 keyidx = rx->skb->data[hdrlen + 3] >> 6; 928 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
929 keyidx = keyid >> 6;
922 930
923 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 931 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
924 932
@@ -939,6 +947,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
939 return RX_DROP_MONITOR; 947 return RX_DROP_MONITOR;
940 } 948 }
941 949
950 if (skb_linearize(rx->skb))
951 return RX_DROP_UNUSABLE;
952
953 hdr = (struct ieee80211_hdr *)rx->skb->data;
954
942 /* Check for weak IVs if possible */ 955 /* Check for weak IVs if possible */
943 if (rx->sta && rx->key->conf.alg == ALG_WEP && 956 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
944 ieee80211_is_data(hdr->frame_control) && 957 ieee80211_is_data(hdr->frame_control) &&
@@ -1077,7 +1090,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1077 sta->rx_fragments++; 1090 sta->rx_fragments++;
1078 sta->rx_bytes += rx->skb->len; 1091 sta->rx_bytes += rx->skb->len;
1079 sta->last_signal = status->signal; 1092 sta->last_signal = status->signal;
1080 sta->last_noise = status->noise;
1081 1093
1082 /* 1094 /*
1083 * Change STA power saving mode only at the end of a frame 1095 * Change STA power saving mode only at the end of a frame
@@ -1240,6 +1252,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1240 } 1252 }
1241 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1253 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1242 1254
1255 if (skb_linearize(rx->skb))
1256 return RX_DROP_UNUSABLE;
1257
1243 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1258 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1244 1259
1245 if (frag == 0) { 1260 if (frag == 0) {
@@ -1405,21 +1420,24 @@ static int
1405ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1420ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1406{ 1421{
1407 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1422 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1423 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1408 __le16 fc = hdr->frame_control; 1424 __le16 fc = hdr->frame_control;
1409 int res;
1410 1425
1411 res = ieee80211_drop_unencrypted(rx, fc); 1426 /*
1412 if (unlikely(res)) 1427 * Pass through unencrypted frames if the hardware has
1413 return res; 1428 * decrypted them already.
1429 */
1430 if (status->flag & RX_FLAG_DECRYPTED)
1431 return 0;
1414 1432
1415 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1433 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1416 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1434 if (unlikely(!ieee80211_has_protected(fc) &&
1435 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1417 rx->key)) 1436 rx->key))
1418 return -EACCES; 1437 return -EACCES;
1419 /* BIP does not use Protected field, so need to check MMIE */ 1438 /* BIP does not use Protected field, so need to check MMIE */
1420 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1439 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1421 ieee80211_get_mmie_keyidx(rx->skb) < 0 && 1440 ieee80211_get_mmie_keyidx(rx->skb) < 0))
1422 rx->key))
1423 return -EACCES; 1441 return -EACCES;
1424 /* 1442 /*
1425 * When using MFP, Action frames are not allowed prior to 1443 * When using MFP, Action frames are not allowed prior to
@@ -1597,6 +1615,9 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1597 skb->dev = dev; 1615 skb->dev = dev;
1598 __skb_queue_head_init(&frame_list); 1616 __skb_queue_head_init(&frame_list);
1599 1617
1618 if (skb_linearize(skb))
1619 return RX_DROP_UNUSABLE;
1620
1600 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1621 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1601 rx->sdata->vif.type, 1622 rx->sdata->vif.type,
1602 rx->local->hw.extra_tx_headroom); 1623 rx->local->hw.extra_tx_headroom);
@@ -1795,10 +1816,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1795 if (ieee80211_is_back_req(bar->frame_control)) { 1816 if (ieee80211_is_back_req(bar->frame_control)) {
1796 if (!rx->sta) 1817 if (!rx->sta)
1797 return RX_DROP_MONITOR; 1818 return RX_DROP_MONITOR;
1819 spin_lock(&rx->sta->lock);
1798 tid = le16_to_cpu(bar->control) >> 12; 1820 tid = le16_to_cpu(bar->control) >> 12;
1799 if (rx->sta->ampdu_mlme.tid_state_rx[tid] 1821 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
1800 != HT_AGG_STATE_OPERATIONAL) 1822 spin_unlock(&rx->sta->lock);
1801 return RX_DROP_MONITOR; 1823 return RX_DROP_MONITOR;
1824 }
1802 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; 1825 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1803 1826
1804 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1827 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
@@ -1812,6 +1835,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1812 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, 1835 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1813 frames); 1836 frames);
1814 kfree_skb(skb); 1837 kfree_skb(skb);
1838 spin_unlock(&rx->sta->lock);
1815 return RX_QUEUED; 1839 return RX_QUEUED;
1816 } 1840 }
1817 1841
@@ -1973,6 +1997,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1973 goto handled; 1997 goto handled;
1974 } 1998 }
1975 break; 1999 break;
2000 case WLAN_CATEGORY_MESH_PLINK:
2001 case WLAN_CATEGORY_MESH_PATH_SEL:
2002 if (ieee80211_vif_is_mesh(&sdata->vif))
2003 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
2004 break;
1976 } 2005 }
1977 2006
1978 /* 2007 /*
@@ -2366,29 +2395,42 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2366 struct ieee80211_local *local = hw_to_local(hw); 2395 struct ieee80211_local *local = hw_to_local(hw);
2367 struct ieee80211_sub_if_data *sdata; 2396 struct ieee80211_sub_if_data *sdata;
2368 struct ieee80211_hdr *hdr; 2397 struct ieee80211_hdr *hdr;
2398 __le16 fc;
2369 struct ieee80211_rx_data rx; 2399 struct ieee80211_rx_data rx;
2370 int prepares; 2400 int prepares;
2371 struct ieee80211_sub_if_data *prev = NULL; 2401 struct ieee80211_sub_if_data *prev = NULL;
2372 struct sk_buff *skb_new; 2402 struct sk_buff *skb_new;
2373 struct sta_info *sta, *tmp; 2403 struct sta_info *sta, *tmp;
2374 bool found_sta = false; 2404 bool found_sta = false;
2405 int err = 0;
2375 2406
2376 hdr = (struct ieee80211_hdr *)skb->data; 2407 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2377 memset(&rx, 0, sizeof(rx)); 2408 memset(&rx, 0, sizeof(rx));
2378 rx.skb = skb; 2409 rx.skb = skb;
2379 rx.local = local; 2410 rx.local = local;
2380 2411
2381 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control)) 2412 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2382 local->dot11ReceivedFragmentCount++; 2413 local->dot11ReceivedFragmentCount++;
2383 2414
2384 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2415 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2385 test_bit(SCAN_OFF_CHANNEL, &local->scanning))) 2416 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2386 rx.flags |= IEEE80211_RX_IN_SCAN; 2417 rx.flags |= IEEE80211_RX_IN_SCAN;
2387 2418
2419 if (ieee80211_is_mgmt(fc))
2420 err = skb_linearize(skb);
2421 else
2422 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2423
2424 if (err) {
2425 dev_kfree_skb(skb);
2426 return;
2427 }
2428
2429 hdr = (struct ieee80211_hdr *)skb->data;
2388 ieee80211_parse_qos(&rx); 2430 ieee80211_parse_qos(&rx);
2389 ieee80211_verify_alignment(&rx); 2431 ieee80211_verify_alignment(&rx);
2390 2432
2391 if (ieee80211_is_data(hdr->frame_control)) { 2433 if (ieee80211_is_data(fc)) {
2392 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2434 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2393 rx.sta = sta; 2435 rx.sta = sta;
2394 found_sta = true; 2436 found_sta = true;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index b822dce97867..e1a3defdf581 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,6 +14,9 @@
14 14
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/pm_qos_params.h>
18#include <net/sch_generic.h>
19#include <linux/slab.h>
17#include <net/mac80211.h> 20#include <net/mac80211.h>
18 21
19#include "ieee80211_i.h" 22#include "ieee80211_i.h"
@@ -245,6 +248,8 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
245 struct ieee80211_local *local = hw_to_local(hw); 248 struct ieee80211_local *local = hw_to_local(hw);
246 bool was_hw_scan; 249 bool was_hw_scan;
247 250
251 trace_api_scan_completed(local, aborted);
252
248 mutex_lock(&local->scan_mtx); 253 mutex_lock(&local->scan_mtx);
249 254
250 /* 255 /*
@@ -321,6 +326,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
321 326
322 ieee80211_offchannel_stop_beaconing(local); 327 ieee80211_offchannel_stop_beaconing(local);
323 328
329 local->leave_oper_channel_time = 0;
324 local->next_scan_state = SCAN_DECISION; 330 local->next_scan_state = SCAN_DECISION;
325 local->scan_channel_idx = 0; 331 local->scan_channel_idx = 0;
326 332
@@ -425,11 +431,28 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
425 return rc; 431 return rc;
426} 432}
427 433
434static unsigned long
435ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
436{
437 /*
438 * TODO: channel switching also consumes quite some time,
439 * add that delay as well to get a better estimation
440 */
441 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
442 return IEEE80211_PASSIVE_CHANNEL_TIME;
443 return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
444}
445
428static int ieee80211_scan_state_decision(struct ieee80211_local *local, 446static int ieee80211_scan_state_decision(struct ieee80211_local *local,
429 unsigned long *next_delay) 447 unsigned long *next_delay)
430{ 448{
431 bool associated = false; 449 bool associated = false;
450 bool tx_empty = true;
451 bool bad_latency;
452 bool listen_int_exceeded;
453 unsigned long min_beacon_int = 0;
432 struct ieee80211_sub_if_data *sdata; 454 struct ieee80211_sub_if_data *sdata;
455 struct ieee80211_channel *next_chan;
433 456
434 /* if no more bands/channels left, complete scan and advance to the idle state */ 457 /* if no more bands/channels left, complete scan and advance to the idle state */
435 if (local->scan_channel_idx >= local->scan_req->n_channels) { 458 if (local->scan_channel_idx >= local->scan_req->n_channels) {
@@ -437,7 +460,11 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
437 return 1; 460 return 1;
438 } 461 }
439 462
440 /* check if at least one STA interface is associated */ 463 /*
464 * check if at least one STA interface is associated,
465 * check if at least one STA interface has pending tx frames
466 * and grab the lowest used beacon interval
467 */
441 mutex_lock(&local->iflist_mtx); 468 mutex_lock(&local->iflist_mtx);
442 list_for_each_entry(sdata, &local->interfaces, list) { 469 list_for_each_entry(sdata, &local->interfaces, list) {
443 if (!ieee80211_sdata_running(sdata)) 470 if (!ieee80211_sdata_running(sdata))
@@ -446,7 +473,16 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
446 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 473 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
447 if (sdata->u.mgd.associated) { 474 if (sdata->u.mgd.associated) {
448 associated = true; 475 associated = true;
449 break; 476
477 if (sdata->vif.bss_conf.beacon_int <
478 min_beacon_int || min_beacon_int == 0)
479 min_beacon_int =
480 sdata->vif.bss_conf.beacon_int;
481
482 if (!qdisc_all_tx_empty(sdata->dev)) {
483 tx_empty = false;
484 break;
485 }
450 } 486 }
451 } 487 }
452 } 488 }
@@ -455,11 +491,34 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
455 if (local->scan_channel) { 491 if (local->scan_channel) {
456 /* 492 /*
457 * we're currently scanning a different channel, let's 493 * we're currently scanning a different channel, let's
458 * switch back to the operating channel now if at least 494 * see if we can scan another channel without interfering
459 * one interface is associated. Otherwise just scan the 495 * with the current traffic situation.
460 * next channel 496 *
497 * Since we don't know if the AP has pending frames for us
498 * we can only check for our tx queues and use the current
499 * pm_qos requirements for rx. Hence, if no tx traffic occurs
500 * at all we will scan as many channels in a row as the pm_qos
501 * latency allows us to. Additionally we also check for the
502 * currently negotiated listen interval to prevent losing
503 * frames unnecessarily.
504 *
505 * Otherwise switch back to the operating channel.
461 */ 506 */
462 if (associated) 507 next_chan = local->scan_req->channels[local->scan_channel_idx];
508
509 bad_latency = time_after(jiffies +
510 ieee80211_scan_get_channel_time(next_chan),
511 local->leave_oper_channel_time +
512 usecs_to_jiffies(pm_qos_requirement(PM_QOS_NETWORK_LATENCY)));
513
514 listen_int_exceeded = time_after(jiffies +
515 ieee80211_scan_get_channel_time(next_chan),
516 local->leave_oper_channel_time +
517 usecs_to_jiffies(min_beacon_int * 1024) *
518 local->hw.conf.listen_interval);
519
520 if (associated && ( !tx_empty || bad_latency ||
521 listen_int_exceeded))
463 local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; 522 local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
464 else 523 else
465 local->next_scan_state = SCAN_SET_CHANNEL; 524 local->next_scan_state = SCAN_SET_CHANNEL;
@@ -491,6 +550,9 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca
491 else 550 else
492 *next_delay = HZ / 10; 551 *next_delay = HZ / 10;
493 552
553 /* remember when we left the operating channel */
554 local->leave_oper_channel_time = jiffies;
555
494 /* advance to the next channel to be scanned */ 556 /* advance to the next channel to be scanned */
495 local->next_scan_state = SCAN_SET_CHANNEL; 557 local->next_scan_state = SCAN_SET_CHANNEL;
496} 558}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 56422d894351..ff0eb948917b 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
93 struct ieee80211_local *local = sdata->local; 93 struct ieee80211_local *local = sdata->local;
94 struct sta_info *sta; 94 struct sta_info *sta;
95 95
96 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 96 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
97 rcu_read_lock_held() ||
98 lockdep_is_held(&local->sta_lock) ||
99 lockdep_is_held(&local->sta_mtx));
97 while (sta) { 100 while (sta) {
98 if (sta->sdata == sdata && 101 if (sta->sdata == sdata &&
99 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 102 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
100 break; 103 break;
101 sta = rcu_dereference(sta->hnext); 104 sta = rcu_dereference_check(sta->hnext,
105 rcu_read_lock_held() ||
106 lockdep_is_held(&local->sta_lock) ||
107 lockdep_is_held(&local->sta_mtx));
102 } 108 }
103 return sta; 109 return sta;
104} 110}
@@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
113 struct ieee80211_local *local = sdata->local; 119 struct ieee80211_local *local = sdata->local;
114 struct sta_info *sta; 120 struct sta_info *sta;
115 121
116 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 122 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
123 rcu_read_lock_held() ||
124 lockdep_is_held(&local->sta_lock) ||
125 lockdep_is_held(&local->sta_mtx));
117 while (sta) { 126 while (sta) {
118 if ((sta->sdata == sdata || 127 if ((sta->sdata == sdata ||
119 sta->sdata->bss == sdata->bss) && 128 sta->sdata->bss == sdata->bss) &&
120 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 129 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
121 break; 130 break;
122 sta = rcu_dereference(sta->hnext); 131 sta = rcu_dereference_check(sta->hnext,
132 rcu_read_lock_held() ||
133 lockdep_is_held(&local->sta_lock) ||
134 lockdep_is_held(&local->sta_mtx));
123 } 135 }
124 return sta; 136 return sta;
125} 137}
@@ -238,9 +250,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
238 * enable session_timer's data differentiation. refer to 250 * enable session_timer's data differentiation. refer to
239 * sta_rx_agg_session_timer_expired for useage */ 251 * sta_rx_agg_session_timer_expired for useage */
240 sta->timer_to_tid[i] = i; 252 sta->timer_to_tid[i] = i;
241 /* rx */
242 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
243 sta->ampdu_mlme.tid_rx[i] = NULL;
244 /* tx */ 253 /* tx */
245 sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE; 254 sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
246 sta->ampdu_mlme.tid_tx[i] = NULL; 255 sta->ampdu_mlme.tid_tx[i] = NULL;
@@ -607,7 +616,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
607 struct ieee80211_sub_if_data *sdata; 616 struct ieee80211_sub_if_data *sdata;
608 struct sk_buff *skb; 617 struct sk_buff *skb;
609 unsigned long flags; 618 unsigned long flags;
610 int ret, i; 619 int ret;
611 620
612 might_sleep(); 621 might_sleep();
613 622
@@ -617,6 +626,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
617 local = sta->local; 626 local = sta->local;
618 sdata = sta->sdata; 627 sdata = sta->sdata;
619 628
629 /*
630 * Before removing the station from the driver and
631 * rate control, it might still start new aggregation
632 * sessions -- block that to make sure the tear-down
633 * will be sufficient.
634 */
635 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
636 ieee80211_sta_tear_down_BA_sessions(sta);
637
620 spin_lock_irqsave(&local->sta_lock, flags); 638 spin_lock_irqsave(&local->sta_lock, flags);
621 ret = sta_info_hash_del(local, sta); 639 ret = sta_info_hash_del(local, sta);
622 /* this might still be the pending list ... which is fine */ 640 /* this might still be the pending list ... which is fine */
@@ -633,9 +651,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
633 * may mean it is removed from hardware which requires that 651 * may mean it is removed from hardware which requires that
634 * the key->sta pointer is still valid, so flush the key todo 652 * the key->sta pointer is still valid, so flush the key todo
635 * list here. 653 * list here.
636 *
637 * ieee80211_key_todo() will synchronize_rcu() so after this
638 * nothing can reference this sta struct any more.
639 */ 654 */
640 ieee80211_key_todo(); 655 ieee80211_key_todo();
641 656
@@ -667,11 +682,17 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
667 sdata = sta->sdata; 682 sdata = sta->sdata;
668 } 683 }
669 684
685 /*
686 * At this point, after we wait for an RCU grace period,
687 * neither mac80211 nor the driver can reference this
688 * sta struct any more except by still existing timers
689 * associated with this station that we clean up below.
690 */
691 synchronize_rcu();
692
670#ifdef CONFIG_MAC80211_MESH 693#ifdef CONFIG_MAC80211_MESH
671 if (ieee80211_vif_is_mesh(&sdata->vif)) { 694 if (ieee80211_vif_is_mesh(&sdata->vif))
672 mesh_accept_plinks_update(sdata); 695 mesh_accept_plinks_update(sdata);
673 del_timer(&sta->plink_timer);
674 }
675#endif 696#endif
676 697
677#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 698#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -698,50 +719,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
698 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) 719 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
699 dev_kfree_skb_any(skb); 720 dev_kfree_skb_any(skb);
700 721
701 for (i = 0; i < STA_TID_NUM; i++) {
702 struct tid_ampdu_rx *tid_rx;
703 struct tid_ampdu_tx *tid_tx;
704
705 spin_lock_bh(&sta->lock);
706 tid_rx = sta->ampdu_mlme.tid_rx[i];
707 /* Make sure timer won't free the tid_rx struct, see below */
708 if (tid_rx)
709 tid_rx->shutdown = true;
710
711 spin_unlock_bh(&sta->lock);
712
713 /*
714 * Outside spinlock - shutdown is true now so that the timer
715 * won't free tid_rx, we have to do that now. Can't let the
716 * timer do it because we have to sync the timer outside the
717 * lock that it takes itself.
718 */
719 if (tid_rx) {
720 del_timer_sync(&tid_rx->session_timer);
721 kfree(tid_rx);
722 }
723
724 /*
725 * No need to do such complications for TX agg sessions, the
726 * path leading to freeing the tid_tx struct goes via a call
727 * from the driver, and thus needs to look up the sta struct
728 * again, which cannot be found when we get here. Hence, we
729 * just need to delete the timer and free the aggregation
730 * info; we won't be telling the peer about it then but that
731 * doesn't matter if we're not talking to it again anyway.
732 */
733 tid_tx = sta->ampdu_mlme.tid_tx[i];
734 if (tid_tx) {
735 del_timer_sync(&tid_tx->addba_resp_timer);
736 /*
737 * STA removed while aggregation session being
738 * started? Bit odd, but purge frames anyway.
739 */
740 skb_queue_purge(&tid_tx->pending);
741 kfree(tid_tx);
742 }
743 }
744
745 __sta_info_free(local, sta); 722 __sta_info_free(local, sta);
746 723
747 return 0; 724 return 0;
@@ -980,6 +957,8 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
980{ 957{
981 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 958 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
982 959
960 trace_api_sta_block_awake(sta->local, pubsta, block);
961
983 if (block) 962 if (block)
984 set_sta_flags(sta, WLAN_STA_PS_DRIVER); 963 set_sta_flags(sta, WLAN_STA_PS_DRIVER);
985 else 964 else
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 822d84522937..48a5e80957f0 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -35,8 +35,8 @@
35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next 35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted. 36 * frame to this station is transmitted.
37 * @WLAN_STA_MFP: Management frame protection is used with this STA. 37 * @WLAN_STA_MFP: Management frame protection is used with this STA.
38 * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle. 38 * @WLAN_STA_BLOCK_BA: Used to deny ADDBA requests (both TX and RX)
39 * Used to deny ADDBA requests (both TX and RX). 39 * during suspend/resume and station removal.
40 * @WLAN_STA_PS_DRIVER: driver requires keeping this station in 40 * @WLAN_STA_PS_DRIVER: driver requires keeping this station in
41 * power-save mode logically to flush frames that might still 41 * power-save mode logically to flush frames that might still
42 * be in the queues 42 * be in the queues
@@ -57,7 +57,7 @@ enum ieee80211_sta_info_flags {
57 WLAN_STA_WDS = 1<<7, 57 WLAN_STA_WDS = 1<<7,
58 WLAN_STA_CLEAR_PS_FILT = 1<<9, 58 WLAN_STA_CLEAR_PS_FILT = 1<<9,
59 WLAN_STA_MFP = 1<<10, 59 WLAN_STA_MFP = 1<<10,
60 WLAN_STA_SUSPEND = 1<<11, 60 WLAN_STA_BLOCK_BA = 1<<11,
61 WLAN_STA_PS_DRIVER = 1<<12, 61 WLAN_STA_PS_DRIVER = 1<<12,
62 WLAN_STA_PSPOLL = 1<<13, 62 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_DISASSOC = 1<<14, 63 WLAN_STA_DISASSOC = 1<<14,
@@ -106,7 +106,6 @@ struct tid_ampdu_tx {
106 * @buf_size: buffer size for incoming A-MPDUs 106 * @buf_size: buffer size for incoming A-MPDUs
107 * @timeout: reset timer value (in TUs). 107 * @timeout: reset timer value (in TUs).
108 * @dialog_token: dialog token for aggregation session 108 * @dialog_token: dialog token for aggregation session
109 * @shutdown: this session is being shut down due to STA removal
110 */ 109 */
111struct tid_ampdu_rx { 110struct tid_ampdu_rx {
112 struct sk_buff **reorder_buf; 111 struct sk_buff **reorder_buf;
@@ -118,7 +117,6 @@ struct tid_ampdu_rx {
118 u16 buf_size; 117 u16 buf_size;
119 u16 timeout; 118 u16 timeout;
120 u8 dialog_token; 119 u8 dialog_token;
121 bool shutdown;
122}; 120};
123 121
124/** 122/**
@@ -156,7 +154,7 @@ enum plink_state {
156 */ 154 */
157struct sta_ampdu_mlme { 155struct sta_ampdu_mlme {
158 /* rx */ 156 /* rx */
159 u8 tid_state_rx[STA_TID_NUM]; 157 bool tid_active_rx[STA_TID_NUM];
160 struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; 158 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
161 /* tx */ 159 /* tx */
162 u8 tid_state_tx[STA_TID_NUM]; 160 u8 tid_state_tx[STA_TID_NUM];
@@ -200,7 +198,6 @@ struct sta_ampdu_mlme {
200 * @rx_fragments: number of received MPDUs 198 * @rx_fragments: number of received MPDUs
201 * @rx_dropped: number of dropped MPDUs from this STA 199 * @rx_dropped: number of dropped MPDUs from this STA
202 * @last_signal: signal of last received frame from this STA 200 * @last_signal: signal of last received frame from this STA
203 * @last_noise: noise of last received frame from this STA
204 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) 201 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
205 * @tx_filtered_count: number of frames the hardware filtered for this STA 202 * @tx_filtered_count: number of frames the hardware filtered for this STA
206 * @tx_retry_failed: number of frames that failed retry 203 * @tx_retry_failed: number of frames that failed retry
@@ -267,7 +264,6 @@ struct sta_info {
267 unsigned long rx_fragments; 264 unsigned long rx_fragments;
268 unsigned long rx_dropped; 265 unsigned long rx_dropped;
269 int last_signal; 266 int last_signal;
270 int last_noise;
271 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 267 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
272 268
273 /* Updated from TX status path only, no locking requirements */ 269 /* Updated from TX status path only, no locking requirements */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 56d5b9a6ec5b..11805a3a626f 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -171,7 +171,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
171 struct net_device *prev_dev = NULL; 171 struct net_device *prev_dev = NULL;
172 struct sta_info *sta, *tmp; 172 struct sta_info *sta, *tmp;
173 int retry_count = -1, i; 173 int retry_count = -1, i;
174 bool injected; 174 bool send_to_cooked;
175 175
176 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 176 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
177 /* the HW cannot have attempted that rate */ 177 /* the HW cannot have attempted that rate */
@@ -296,11 +296,15 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
296 /* this was a transmitted frame, but now we want to reuse it */ 296 /* this was a transmitted frame, but now we want to reuse it */
297 skb_orphan(skb); 297 skb_orphan(skb);
298 298
299 /* Need to make a copy before skb->cb gets cleared */
300 send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
301 (type != IEEE80211_FTYPE_DATA);
302
299 /* 303 /*
300 * This is a bit racy but we can avoid a lot of work 304 * This is a bit racy but we can avoid a lot of work
301 * with this test... 305 * with this test...
302 */ 306 */
303 if (!local->monitors && !local->cooked_mntrs) { 307 if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
304 dev_kfree_skb(skb); 308 dev_kfree_skb(skb);
305 return; 309 return;
306 } 310 }
@@ -345,9 +349,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
345 /* for now report the total retry_count */ 349 /* for now report the total retry_count */
346 rthdr->data_retries = retry_count; 350 rthdr->data_retries = retry_count;
347 351
348 /* Need to make a copy before skb->cb gets cleared */
349 injected = !!(info->flags & IEEE80211_TX_CTL_INJECTED);
350
351 /* XXX: is this sufficient for BPF? */ 352 /* XXX: is this sufficient for BPF? */
352 skb_set_mac_header(skb, 0); 353 skb_set_mac_header(skb, 0);
353 skb->ip_summed = CHECKSUM_UNNECESSARY; 354 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -362,8 +363,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
362 continue; 363 continue;
363 364
364 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && 365 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
365 !injected && 366 !send_to_cooked)
366 (type == IEEE80211_FTYPE_DATA))
367 continue; 367 continue;
368 368
369 if (prev_dev) { 369 if (prev_dev) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index cbe53ed4fb0b..2cb77267f733 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -513,6 +513,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
513 else if (tx->sta && (key = rcu_dereference(tx->sta->key))) 513 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
514 tx->key = key; 514 tx->key = key;
515 else if (ieee80211_is_mgmt(hdr->frame_control) && 515 else if (ieee80211_is_mgmt(hdr->frame_control) &&
516 is_multicast_ether_addr(hdr->addr1) &&
517 ieee80211_is_robust_mgmt_frame(hdr) &&
516 (key = rcu_dereference(tx->sdata->default_mgmt_key))) 518 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
517 tx->key = key; 519 tx->key = key;
518 else if ((key = rcu_dereference(tx->sdata->default_key))) 520 else if ((key = rcu_dereference(tx->sdata->default_key)))
@@ -1142,13 +1144,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1142 1144
1143 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1145 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1144 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1146 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
1145 unsigned long flags;
1146 struct tid_ampdu_tx *tid_tx; 1147 struct tid_ampdu_tx *tid_tx;
1147 1148
1148 qc = ieee80211_get_qos_ctl(hdr); 1149 qc = ieee80211_get_qos_ctl(hdr);
1149 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1150 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1150 1151
1151 spin_lock_irqsave(&tx->sta->lock, flags); 1152 spin_lock(&tx->sta->lock);
1152 /* 1153 /*
1153 * XXX: This spinlock could be fairly expensive, but see the 1154 * XXX: This spinlock could be fairly expensive, but see the
1154 * comment in agg-tx.c:ieee80211_agg_tx_operational(). 1155 * comment in agg-tx.c:ieee80211_agg_tx_operational().
@@ -1173,7 +1174,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1173 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1174 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1174 __skb_queue_tail(&tid_tx->pending, skb); 1175 __skb_queue_tail(&tid_tx->pending, skb);
1175 } 1176 }
1176 spin_unlock_irqrestore(&tx->sta->lock, flags); 1177 spin_unlock(&tx->sta->lock);
1177 1178
1178 if (unlikely(queued)) 1179 if (unlikely(queued))
1179 return TX_QUEUED; 1180 return TX_QUEUED;
@@ -1991,6 +1992,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1991void ieee80211_tx_pending(unsigned long data) 1992void ieee80211_tx_pending(unsigned long data)
1992{ 1993{
1993 struct ieee80211_local *local = (struct ieee80211_local *)data; 1994 struct ieee80211_local *local = (struct ieee80211_local *)data;
1995 struct ieee80211_sub_if_data *sdata;
1994 unsigned long flags; 1996 unsigned long flags;
1995 int i; 1997 int i;
1996 bool txok; 1998 bool txok;
@@ -2010,14 +2012,12 @@ void ieee80211_tx_pending(unsigned long data)
2010 while (!skb_queue_empty(&local->pending[i])) { 2012 while (!skb_queue_empty(&local->pending[i])) {
2011 struct sk_buff *skb = __skb_dequeue(&local->pending[i]); 2013 struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
2012 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2014 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2013 struct ieee80211_sub_if_data *sdata;
2014 2015
2015 if (WARN_ON(!info->control.vif)) { 2016 if (WARN_ON(!info->control.vif)) {
2016 kfree_skb(skb); 2017 kfree_skb(skb);
2017 continue; 2018 continue;
2018 } 2019 }
2019 2020
2020 sdata = vif_to_sdata(info->control.vif);
2021 spin_unlock_irqrestore(&local->queue_stop_reason_lock, 2021 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
2022 flags); 2022 flags);
2023 2023
@@ -2029,6 +2029,11 @@ void ieee80211_tx_pending(unsigned long data)
2029 if (!txok) 2029 if (!txok)
2030 break; 2030 break;
2031 } 2031 }
2032
2033 if (skb_queue_empty(&local->pending[i]))
2034 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2035 netif_tx_wake_queue(
2036 netdev_get_tx_queue(sdata->dev, i));
2032 } 2037 }
2033 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 2038 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2034 2039
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index c453226f06b2..2b75b4fb68f4 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -270,6 +270,8 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
270 struct ieee80211_local *local = hw_to_local(hw); 270 struct ieee80211_local *local = hw_to_local(hw);
271 struct ieee80211_sub_if_data *sdata; 271 struct ieee80211_sub_if_data *sdata;
272 272
273 trace_wake_queue(local, queue, reason);
274
273 if (WARN_ON(queue >= hw->queues)) 275 if (WARN_ON(queue >= hw->queues))
274 return; 276 return;
275 277
@@ -279,13 +281,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
279 /* someone still has this queue stopped */ 281 /* someone still has this queue stopped */
280 return; 282 return;
281 283
282 if (!skb_queue_empty(&local->pending[queue])) 284 if (skb_queue_empty(&local->pending[queue])) {
285 rcu_read_lock();
286 list_for_each_entry_rcu(sdata, &local->interfaces, list)
287 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
288 rcu_read_unlock();
289 } else
283 tasklet_schedule(&local->tx_pending_tasklet); 290 tasklet_schedule(&local->tx_pending_tasklet);
284
285 rcu_read_lock();
286 list_for_each_entry_rcu(sdata, &local->interfaces, list)
287 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
288 rcu_read_unlock();
289} 291}
290 292
291void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 293void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -312,6 +314,8 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
312 struct ieee80211_local *local = hw_to_local(hw); 314 struct ieee80211_local *local = hw_to_local(hw);
313 struct ieee80211_sub_if_data *sdata; 315 struct ieee80211_sub_if_data *sdata;
314 316
317 trace_stop_queue(local, queue, reason);
318
315 if (WARN_ON(queue >= hw->queues)) 319 if (WARN_ON(queue >= hw->queues))
316 return; 320 return;
317 321
@@ -796,6 +800,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
796 800
797 drv_conf_tx(local, queue, &qparam); 801 drv_conf_tx(local, queue, &qparam);
798 } 802 }
803
804 /* after reinitialize QoS TX queues setting to default,
805 * disable QoS at all */
806 local->hw.conf.flags &= ~IEEE80211_CONF_QOS;
807 drv_config(local, IEEE80211_CONF_CHANGE_QOS);
799} 808}
800 809
801void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 810void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -1097,9 +1106,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1097 */ 1106 */
1098 res = drv_start(local); 1107 res = drv_start(local);
1099 if (res) { 1108 if (res) {
1100 WARN(local->suspended, "Harware became unavailable " 1109 WARN(local->suspended, "Hardware became unavailable "
1101 "upon resume. This is could be a software issue" 1110 "upon resume. This could be a software issue "
1102 "prior to suspend or a hardware issue\n"); 1111 "prior to suspend or a hardware issue.\n");
1103 return res; 1112 return res;
1104 } 1113 }
1105 1114
@@ -1135,7 +1144,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1135 1144
1136 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 1145 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1137 list_for_each_entry_rcu(sta, &local->sta_list, list) { 1146 list_for_each_entry_rcu(sta, &local->sta_list, list) {
1138 clear_sta_flags(sta, WLAN_STA_SUSPEND); 1147 clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
1139 } 1148 }
1140 } 1149 }
1141 1150
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 5d745f2d7236..5f3a4113bda1 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -17,6 +17,7 @@
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
20#include <linux/slab.h>
20#include <asm/unaligned.h> 21#include <asm/unaligned.h>
21 22
22#include <net/mac80211.h> 23#include <net/mac80211.h>
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 1e1ea3007b06..bdb1d05b16fc 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -19,6 +19,7 @@
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/crc32.h> 21#include <linux/crc32.h>
22#include <linux/slab.h>
22#include <net/mac80211.h> 23#include <net/mac80211.h>
23#include <asm/unaligned.h> 24#include <asm/unaligned.h>
24 25
@@ -919,11 +920,16 @@ static void ieee80211_work_work(struct work_struct *work)
919 run_again(local, jiffies + HZ/2); 920 run_again(local, jiffies + HZ/2);
920 } 921 }
921 922
922 if (list_empty(&local->work_list) && local->scan_req) 923 mutex_lock(&local->scan_mtx);
924
925 if (list_empty(&local->work_list) && local->scan_req &&
926 !local->scanning)
923 ieee80211_queue_delayed_work(&local->hw, 927 ieee80211_queue_delayed_work(&local->hw,
924 &local->scan_work, 928 &local->scan_work,
925 round_jiffies_relative(0)); 929 round_jiffies_relative(0));
926 930
931 mutex_unlock(&local->scan_mtx);
932
927 mutex_unlock(&local->work_mtx); 933 mutex_unlock(&local->work_mtx);
928 934
929 ieee80211_recalc_idle(local); 935 ieee80211_recalc_idle(local);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index f4971cd45c64..0adbcc941ac9 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -9,10 +9,10 @@
9 9
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/skbuff.h> 12#include <linux/skbuff.h>
14#include <linux/compiler.h> 13#include <linux/compiler.h>
15#include <linux/ieee80211.h> 14#include <linux/ieee80211.h>
15#include <linux/gfp.h>
16#include <asm/unaligned.h> 16#include <asm/unaligned.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18 18
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 60ec4e4badaa..78b505d33bfb 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -19,6 +19,7 @@
19#include <linux/inetdevice.h> 19#include <linux/inetdevice.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/slab.h>
22#include <net/net_namespace.h> 23#include <net/net_namespace.h>
23#include <net/sock.h> 24#include <net/sock.h>
24 25
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 3c7e42735b60..1cb0e834f8ff 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -27,6 +27,7 @@
27#include <linux/in.h> 27#include <linux/in.h>
28#include <linux/ip.h> 28#include <linux/ip.h>
29#include <linux/netfilter.h> 29#include <linux/netfilter.h>
30#include <linux/slab.h>
30#include <net/net_namespace.h> 31#include <net/net_namespace.h>
31#include <net/protocol.h> 32#include <net/protocol.h>
32#include <net/tcp.h> 33#include <net/tcp.h>
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 60bb41a8d8d4..d8f7e8ef67b4 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/proc_fs.h> /* for proc_net_* */ 34#include <linux/proc_fs.h> /* for proc_net_* */
35#include <linux/slab.h>
35#include <linux/seq_file.h> 36#include <linux/seq_file.h>
36#include <linux/jhash.h> 37#include <linux/jhash.h>
37#include <linux/random.h> 38#include <linux/random.h>
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 44590887a92c..1cd6e3fd058b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -33,6 +33,7 @@
33#include <linux/tcp.h> 33#include <linux/tcp.h>
34#include <linux/sctp.h> 34#include <linux/sctp.h>
35#include <linux/icmp.h> 35#include <linux/icmp.h>
36#include <linux/slab.h>
36 37
37#include <net/ip.h> 38#include <net/ip.h>
38#include <net/tcp.h> 39#include <net/tcp.h>
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 7ee9c3426f44..36dc1d88c2fa 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -31,6 +31,7 @@
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/slab.h>
34 35
35#include <linux/netfilter.h> 36#include <linux/netfilter.h>
36#include <linux/netfilter_ipv4.h> 37#include <linux/netfilter_ipv4.h>
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index fe3e18834b91..95fd0d14200b 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -39,6 +39,7 @@
39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40 40
41#include <linux/ip.h> 41#include <linux/ip.h>
42#include <linux/slab.h>
42#include <linux/module.h> 43#include <linux/module.h>
43#include <linux/kernel.h> 44#include <linux/kernel.h>
44#include <linux/skbuff.h> 45#include <linux/skbuff.h>
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 702b53ca937c..ff28801962e0 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/jiffies.h> 19#include <linux/jiffies.h>
20#include <linux/slab.h>
21#include <linux/types.h> 20#include <linux/types.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/sysctl.h> 22#include <linux/sysctl.h>
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 9f6328303844..2ae747a376a5 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -32,6 +32,7 @@
32#include <linux/in.h> 32#include <linux/in.h>
33#include <linux/ip.h> 33#include <linux/ip.h>
34#include <linux/netfilter.h> 34#include <linux/netfilter.h>
35#include <linux/gfp.h>
35#include <net/protocol.h> 36#include <net/protocol.h>
36#include <net/tcp.h> 37#include <net/tcp.h>
37#include <asm/unaligned.h> 38#include <asm/unaligned.h>
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 1b9370db2305..94a45213faa6 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -43,6 +43,7 @@
43#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 43#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 44
45#include <linux/ip.h> 45#include <linux/ip.h>
46#include <linux/slab.h>
46#include <linux/module.h> 47#include <linux/module.h>
47#include <linux/kernel.h> 48#include <linux/kernel.h>
48#include <linux/skbuff.h> 49#include <linux/skbuff.h>
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index caa58fa1438a..535dc2b419d8 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -46,6 +46,7 @@
46#include <linux/skbuff.h> 46#include <linux/skbuff.h>
47#include <linux/jiffies.h> 47#include <linux/jiffies.h>
48#include <linux/list.h> 48#include <linux/list.h>
49#include <linux/slab.h>
49 50
50/* for sysctl */ 51/* for sysctl */
51#include <linux/fs.h> 52#include <linux/fs.h>
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 27add971bb13..2d3d5e4b35f8 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/gfp.h>
22#include <linux/in.h> 23#include <linux/in.h>
23#include <linux/ip.h> 24#include <linux/ip.h>
24#include <net/protocol.h> 25#include <net/protocol.h>
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 8e6cfd36e6f0..e6cc174fbc06 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -36,6 +36,7 @@
36#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 36#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
37 37
38#include <linux/ip.h> 38#include <linux/ip.h>
39#include <linux/slab.h>
39#include <linux/module.h> 40#include <linux/module.h>
40#include <linux/kernel.h> 41#include <linux/kernel.h>
41#include <linux/skbuff.h> 42#include <linux/skbuff.h>
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 3c115fc19784..30db633f88f1 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h>
26#include <linux/net.h> 27#include <linux/net.h>
27#include <linux/gcd.h> 28#include <linux/gcd.h>
28 29
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index d0a7b7b05ddb..93c15a107b2c 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -17,6 +17,7 @@
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/slab.h>
20#include <linux/tcp.h> /* for tcphdr */ 21#include <linux/tcp.h> /* for tcphdr */
21#include <net/ip.h> 22#include <net/ip.h>
22#include <net/tcp.h> /* for csum_tcpudp_magic */ 23#include <net/tcp.h> /* for csum_tcpudp_magic */
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 018f90db511c..ab81b380eae6 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/netfilter.h> 11#include <linux/netfilter.h>
12#include <linux/slab.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
14 15
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 07d9d8857e5d..372e80f07a81 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -16,6 +16,7 @@
16#include <linux/in.h> 16#include <linux/in.h>
17#include <linux/udp.h> 17#include <linux/udp.h>
18#include <linux/netfilter.h> 18#include <linux/netfilter.h>
19#include <linux/gfp.h>
19 20
20#include <net/netfilter/nf_conntrack.h> 21#include <net/netfilter/nf_conntrack.h>
21#include <net/netfilter/nf_conntrack_expect.h> 22#include <net/netfilter/nf_conntrack_expect.h>
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 849614af2322..a94ac3ad02cb 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -18,6 +18,7 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/slab.h>
21 22
22#include <net/netfilter/nf_conntrack.h> 23#include <net/netfilter/nf_conntrack.h>
23#include <net/netfilter/nf_conntrack_core.h> 24#include <net/netfilter/nf_conntrack_core.h>
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index f0732aa18e4f..2ae3169e7633 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -13,6 +13,7 @@
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/netfilter.h> 14#include <linux/netfilter.h>
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <linux/slab.h>
16#include <linux/ipv6.h> 17#include <linux/ipv6.h>
17#include <linux/ctype.h> 18#include <linux/ctype.h>
18#include <linux/inet.h> 19#include <linux/inet.h>
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index a1c8dd917e12..a487c8038044 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -17,6 +17,7 @@
17#include <linux/inet.h> 17#include <linux/inet.h>
18#include <linux/in.h> 18#include <linux/in.h>
19#include <linux/ip.h> 19#include <linux/ip.h>
20#include <linux/slab.h>
20#include <linux/udp.h> 21#include <linux/udp.h>
21#include <linux/tcp.h> 22#include <linux/tcp.h>
22#include <linux/skbuff.h> 23#include <linux/skbuff.h>
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4509fa6726f8..59e1a4cd4e8b 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -15,7 +15,6 @@
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/stddef.h> 17#include <linux/stddef.h>
18#include <linux/slab.h>
19#include <linux/random.h> 18#include <linux/random.h>
20#include <linux/err.h> 19#include <linux/err.h>
21#include <linux/kernel.h> 20#include <linux/kernel.h>
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 8bd98c84f77e..7673930ca342 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -15,6 +15,7 @@
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <linux/tcp.h> 16#include <linux/tcp.h>
17#include <linux/netfilter.h> 17#include <linux/netfilter.h>
18#include <linux/slab.h>
18 19
19#include <net/netfilter/nf_conntrack.h> 20#include <net/netfilter/nf_conntrack.h>
20#include <net/netfilter/nf_conntrack_expect.h> 21#include <net/netfilter/nf_conntrack_expect.h>
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9a0c0d99dbfd..4e55403bf263 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -27,6 +27,7 @@
27#include <linux/netlink.h> 27#include <linux/netlink.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/slab.h>
30 31
31#include <linux/netfilter.h> 32#include <linux/netfilter.h>
32#include <net/netlink.h> 33#include <net/netlink.h>
@@ -589,7 +590,9 @@ nla_put_failure:
589nlmsg_failure: 590nlmsg_failure:
590 kfree_skb(skb); 591 kfree_skb(skb);
591errout: 592errout:
592 nfnetlink_set_err(net, 0, group, -ENOBUFS); 593 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
594 return -ENOBUFS;
595
593 return 0; 596 return 0;
594} 597}
595#endif /* CONFIG_NF_CONNTRACK_EVENTS */ 598#endif /* CONFIG_NF_CONNTRACK_EVENTS */
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index f71cd5da751c..a6defc793601 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/netfilter.h> 13#include <linux/netfilter.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
15#include <linux/mutex.h> 16#include <linux/mutex.h>
16#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
17#include <linux/stddef.h> 18#include <linux/stddef.h>
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 9a2815549375..5292560d6d4a 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -15,6 +15,7 @@
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/dccp.h> 17#include <linux/dccp.h>
18#include <linux/slab.h>
18 19
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <net/netns/generic.h> 21#include <net/netns/generic.h>
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index d899b1a69940..cf616e55ca41 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -31,6 +31,7 @@
31#include <linux/in.h> 31#include <linux/in.h>
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/slab.h>
34#include <net/dst.h> 35#include <net/dst.h>
35#include <net/net_namespace.h> 36#include <net/net_namespace.h>
36#include <net/netns/generic.h> 37#include <net/netns/generic.h>
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index dcfecbb81c46..d9e27734b2a2 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -20,6 +20,7 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
22#include <linux/netfilter.h> 22#include <linux/netfilter.h>
23#include <linux/slab.h>
23#include <linux/in.h> 24#include <linux/in.h>
24#include <linux/tcp.h> 25#include <linux/tcp.h>
25#include <net/netfilter/nf_conntrack.h> 26#include <net/netfilter/nf_conntrack.h>
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 24a42efe62ef..faa8eb3722b9 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/netfilter.h> 10#include <linux/netfilter.h>
11#include <linux/slab.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/skbuff.h> 13#include <linux/skbuff.h>
13#include <linux/proc_fs.h> 14#include <linux/proc_fs.h>
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index ba095fd014e5..c49ef219899e 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -1,4 +1,5 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/slab.h>
2#include <linux/init.h> 3#include <linux/init.h>
3#include <linux/module.h> 4#include <linux/module.h>
4#include <linux/proc_fs.h> 5#include <linux/proc_fs.h>
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 0794f9a106ee..39b0e3100575 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -110,9 +110,9 @@ int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
110} 110}
111EXPORT_SYMBOL_GPL(nfnetlink_send); 111EXPORT_SYMBOL_GPL(nfnetlink_send);
112 112
113void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) 113int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error)
114{ 114{
115 netlink_set_err(net->nfnl, pid, group, error); 115 return netlink_set_err(net->nfnl, pid, group, error);
116} 116}
117EXPORT_SYMBOL_GPL(nfnetlink_set_err); 117EXPORT_SYMBOL_GPL(nfnetlink_set_err);
118 118
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index d9b8fb8ab340..203643fb2c52 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -28,6 +28,7 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/jhash.h> 29#include <linux/jhash.h>
30#include <linux/random.h> 30#include <linux/random.h>
31#include <linux/slab.h>
31#include <net/sock.h> 32#include <net/sock.h>
32#include <net/netfilter/nf_log.h> 33#include <net/netfilter/nf_log.h>
33#include <net/netfilter/nfnetlink_log.h> 34#include <net/netfilter/nfnetlink_log.h>
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 08c1a33077a0..12e1ab37fcd8 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/slab.h>
21#include <linux/notifier.h> 22#include <linux/notifier.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
23#include <linux/netfilter.h> 24#include <linux/netfilter.h>
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index edde5c602890..3ae32340d4df 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -22,6 +22,7 @@
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/slab.h>
25#include <net/net_namespace.h> 26#include <net/net_namespace.h>
26 27
27#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 65dd348ae361..c8f547829bad 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/gfp.h>
10#include <linux/skbuff.h> 11#include <linux/skbuff.h>
11#include <linux/selinux.h> 12#include <linux/selinux.h>
12#include <linux/netfilter_ipv4/ip_tables.h> 13#include <linux/netfilter_ipv4/ip_tables.h>
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index bd102c77d1f0..ab6f8ff9c9a7 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/netfilter/x_tables.h> 24#include <linux/netfilter/x_tables.h>
25#include <linux/slab.h>
25#include <linux/leds.h> 26#include <linux/leds.h>
26#include <linux/mutex.h> 27#include <linux/mutex.h>
27 28
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 40751c618e70..a02193f06e39 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -11,6 +11,7 @@
11#include <linux/jhash.h> 11#include <linux/jhash.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include <linux/random.h> 13#include <linux/random.h>
14#include <linux/slab.h>
14#include <net/gen_stats.h> 15#include <net/gen_stats.h>
15#include <net/netlink.h> 16#include <net/netlink.h>
16 17
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 385677b963d5..d04606459c9d 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/ip.h> 13#include <linux/ip.h>
14#include <linux/gfp.h>
14#include <linux/ipv6.h> 15#include <linux/ipv6.h>
15#include <linux/tcp.h> 16#include <linux/tcp.h>
16#include <net/dst.h> 17#include <net/dst.h>
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 370088ec5764..326bc1b81681 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -17,6 +17,7 @@
17#include <linux/ip.h> 17#include <linux/ip.h>
18#include <linux/ipv6.h> 18#include <linux/ipv6.h>
19#include <linux/jhash.h> 19#include <linux/jhash.h>
20#include <linux/slab.h>
20#include <linux/list.h> 21#include <linux/list.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/random.h> 23#include <linux/random.h>
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c
index f54699ca5609..0d260aec487f 100644
--- a/net/netfilter/xt_dccp.c
+++ b/net/netfilter/xt_dccp.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/slab.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <net/ip.h> 15#include <net/ip.h>
15#include <linux/dccp.h> 16#include <linux/dccp.h>
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 453178d25cba..0c366d387c8c 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -431,6 +431,7 @@ static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
431 case 64 ... 95: 431 case 64 ... 95:
432 i[2] = maskl(i[2], p - 64); 432 i[2] = maskl(i[2], p - 64);
433 i[3] = 0; 433 i[3] = 0;
434 break;
434 case 96 ... 127: 435 case 96 ... 127:
435 i[3] = maskl(i[3], p - 96); 436 i[3] = maskl(i[3], p - 96);
436 break; 437 break;
@@ -674,7 +675,8 @@ static void dl_seq_stop(struct seq_file *s, void *v)
674 struct xt_hashlimit_htable *htable = s->private; 675 struct xt_hashlimit_htable *htable = s->private;
675 unsigned int *bucket = (unsigned int *)v; 676 unsigned int *bucket = (unsigned int *)v;
676 677
677 kfree(bucket); 678 if (!IS_ERR(bucket))
679 kfree(bucket);
678 spin_unlock_bh(&htable->lock); 680 spin_unlock_bh(&htable->lock);
679} 681}
680 682
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index e2a284ebb415..88215dca19cb 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -6,6 +6,8 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/slab.h>
9#include <linux/module.h> 11#include <linux/module.h>
10#include <linux/skbuff.h> 12#include <linux/skbuff.h>
11#include <linux/spinlock.h> 13#include <linux/spinlock.h>
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 3e5cbd85a65b..7c95d69f6f06 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -4,6 +4,7 @@
4 * Sam Johnston <samj@samj.net> 4 * Sam Johnston <samj@samj.net>
5 */ 5 */
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/slab.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
8 9
9#include <linux/netfilter/x_tables.h> 10#include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 0d9f80b1dd9f..b88d63b9c76a 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -28,6 +28,7 @@
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/inet.h> 30#include <linux/inet.h>
31#include <linux/slab.h>
31#include <net/net_namespace.h> 32#include <net/net_namespace.h>
32#include <net/netns/generic.h> 33#include <net/netns/generic.h>
33 34
@@ -284,7 +285,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
284 for (i = 0; i < e->nstamps; i++) { 285 for (i = 0; i < e->nstamps; i++) {
285 if (info->seconds && time_after(time, e->stamps[i])) 286 if (info->seconds && time_after(time, e->stamps[i]))
286 continue; 287 continue;
287 if (info->hit_count && ++hits >= info->hit_count) { 288 if (!info->hit_count || ++hits >= info->hit_count) {
288 ret = !ret; 289 ret = !ret;
289 break; 290 break;
290 } 291 }
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 8ed2b2905091..5aeca1d023d8 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/net.h> 14#include <linux/net.h>
15#include <linux/slab.h>
15 16
16#include <linux/netfilter/xt_statistic.h> 17#include <linux/netfilter/xt_statistic.h>
17#include <linux/netfilter/x_tables.h> 18#include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index 488e368a2c4e..f6d5112175e6 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -7,6 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/gfp.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index e639298bc9c8..5f14c8462e30 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -33,6 +33,7 @@
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/audit.h> 35#include <linux/audit.h>
36#include <linux/slab.h>
36#include <net/sock.h> 37#include <net/sock.h>
37#include <net/netlink.h> 38#include <net/netlink.h>
38#include <net/genetlink.h> 39#include <net/genetlink.h>
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 0bfeaab88ef5..d37b7f80fa37 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -35,6 +35,7 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/audit.h> 37#include <linux/audit.h>
38#include <linux/slab.h>
38#include <net/netlabel.h> 39#include <net/netlabel.h>
39#include <net/cipso_ipv4.h> 40#include <net/cipso_ipv4.h>
40#include <asm/bug.h> 41#include <asm/bug.h>
@@ -50,9 +51,12 @@ struct netlbl_domhsh_tbl {
50}; 51};
51 52
52/* Domain hash table */ 53/* Domain hash table */
53/* XXX - updates should be so rare that having one spinlock for the entire 54/* updates should be so rare that having one spinlock for the entire hash table
54 * hash table should be okay */ 55 * should be okay */
55static DEFINE_SPINLOCK(netlbl_domhsh_lock); 56static DEFINE_SPINLOCK(netlbl_domhsh_lock);
57#define netlbl_domhsh_rcu_deref(p) \
58 rcu_dereference_check(p, rcu_read_lock_held() || \
59 lockdep_is_held(&netlbl_domhsh_lock))
56static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; 60static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL;
57static struct netlbl_dom_map *netlbl_domhsh_def = NULL; 61static struct netlbl_dom_map *netlbl_domhsh_def = NULL;
58 62
@@ -106,7 +110,8 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
106 * Description: 110 * Description:
107 * This is the hashing function for the domain hash table, it returns the 111 * This is the hashing function for the domain hash table, it returns the
108 * correct bucket number for the domain. The caller is responsibile for 112 * correct bucket number for the domain. The caller is responsibile for
109 * calling the rcu_read_[un]lock() functions. 113 * ensuring that the hash table is protected with either a RCU read lock or the
114 * hash table lock.
110 * 115 *
111 */ 116 */
112static u32 netlbl_domhsh_hash(const char *key) 117static u32 netlbl_domhsh_hash(const char *key)
@@ -120,7 +125,7 @@ static u32 netlbl_domhsh_hash(const char *key)
120 125
121 for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) 126 for (iter = 0, val = 0, len = strlen(key); iter < len; iter++)
122 val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; 127 val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter];
123 return val & (rcu_dereference(netlbl_domhsh)->size - 1); 128 return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1);
124} 129}
125 130
126/** 131/**
@@ -130,7 +135,8 @@ static u32 netlbl_domhsh_hash(const char *key)
130 * Description: 135 * Description:
131 * Searches the domain hash table and returns a pointer to the hash table 136 * Searches the domain hash table and returns a pointer to the hash table
132 * entry if found, otherwise NULL is returned. The caller is responsibile for 137 * entry if found, otherwise NULL is returned. The caller is responsibile for
133 * the rcu hash table locks (i.e. the caller much call rcu_read_[un]lock()). 138 * ensuring that the hash table is protected with either a RCU read lock or the
139 * hash table lock.
134 * 140 *
135 */ 141 */
136static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) 142static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
@@ -141,7 +147,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
141 147
142 if (domain != NULL) { 148 if (domain != NULL) {
143 bkt = netlbl_domhsh_hash(domain); 149 bkt = netlbl_domhsh_hash(domain);
144 bkt_list = &rcu_dereference(netlbl_domhsh)->tbl[bkt]; 150 bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt];
145 list_for_each_entry_rcu(iter, bkt_list, list) 151 list_for_each_entry_rcu(iter, bkt_list, list)
146 if (iter->valid && strcmp(iter->domain, domain) == 0) 152 if (iter->valid && strcmp(iter->domain, domain) == 0)
147 return iter; 153 return iter;
@@ -159,8 +165,8 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
159 * Searches the domain hash table and returns a pointer to the hash table 165 * Searches the domain hash table and returns a pointer to the hash table
160 * entry if an exact match is found, if an exact match is not present in the 166 * entry if an exact match is found, if an exact match is not present in the
161 * hash table then the default entry is returned if valid otherwise NULL is 167 * hash table then the default entry is returned if valid otherwise NULL is
162 * returned. The caller is responsibile for the rcu hash table locks 168 * returned. The caller is responsibile ensuring that the hash table is
163 * (i.e. the caller much call rcu_read_[un]lock()). 169 * protected with either a RCU read lock or the hash table lock.
164 * 170 *
165 */ 171 */
166static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) 172static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
@@ -169,7 +175,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
169 175
170 entry = netlbl_domhsh_search(domain); 176 entry = netlbl_domhsh_search(domain);
171 if (entry == NULL) { 177 if (entry == NULL) {
172 entry = rcu_dereference(netlbl_domhsh_def); 178 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def);
173 if (entry != NULL && !entry->valid) 179 if (entry != NULL && !entry->valid)
174 entry = NULL; 180 entry = NULL;
175 } 181 }
@@ -306,8 +312,11 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
306 struct netlbl_af6list *tmp6; 312 struct netlbl_af6list *tmp6;
307#endif /* IPv6 */ 313#endif /* IPv6 */
308 314
315 /* XXX - we can remove this RCU read lock as the spinlock protects the
316 * entire function, but before we do we need to fixup the
317 * netlbl_af[4,6]list RCU functions to do "the right thing" with
318 * respect to rcu_dereference() when only a spinlock is held. */
309 rcu_read_lock(); 319 rcu_read_lock();
310
311 spin_lock(&netlbl_domhsh_lock); 320 spin_lock(&netlbl_domhsh_lock);
312 if (entry->domain != NULL) 321 if (entry->domain != NULL)
313 entry_old = netlbl_domhsh_search(entry->domain); 322 entry_old = netlbl_domhsh_search(entry->domain);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 6ce00205f342..1b83e0009d8d 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/slab.h>
33#include <linux/audit.h> 34#include <linux/audit.h>
34#include <linux/in.h> 35#include <linux/in.h>
35#include <linux/in6.h> 36#include <linux/in6.h>
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 8203623e65ad..998e85e895d0 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -34,6 +34,7 @@
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/in.h> 35#include <linux/in.h>
36#include <linux/in6.h> 36#include <linux/in6.h>
37#include <linux/slab.h>
37#include <net/sock.h> 38#include <net/sock.h>
38#include <net/netlink.h> 39#include <net/netlink.h>
39#include <net/genetlink.h> 40#include <net/genetlink.h>
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 852d9d7976b9..a3d64aabe2f7 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -43,6 +43,7 @@
43#include <linux/notifier.h> 43#include <linux/notifier.h>
44#include <linux/netdevice.h> 44#include <linux/netdevice.h>
45#include <linux/security.h> 45#include <linux/security.h>
46#include <linux/slab.h>
46#include <net/sock.h> 47#include <net/sock.h>
47#include <net/netlink.h> 48#include <net/netlink.h>
48#include <net/genetlink.h> 49#include <net/genetlink.h>
@@ -114,6 +115,9 @@ struct netlbl_unlhsh_walk_arg {
114/* updates should be so rare that having one spinlock for the entire 115/* updates should be so rare that having one spinlock for the entire
115 * hash table should be okay */ 116 * hash table should be okay */
116static DEFINE_SPINLOCK(netlbl_unlhsh_lock); 117static DEFINE_SPINLOCK(netlbl_unlhsh_lock);
118#define netlbl_unlhsh_rcu_deref(p) \
119 rcu_dereference_check(p, rcu_read_lock_held() || \
120 lockdep_is_held(&netlbl_unlhsh_lock))
117static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; 121static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL;
118static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; 122static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL;
119 123
@@ -235,15 +239,13 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
235 * Description: 239 * Description:
236 * This is the hashing function for the unlabeled hash table, it returns the 240 * This is the hashing function for the unlabeled hash table, it returns the
237 * bucket number for the given device/interface. The caller is responsible for 241 * bucket number for the given device/interface. The caller is responsible for
238 * calling the rcu_read_[un]lock() functions. 242 * ensuring that the hash table is protected with either a RCU read lock or
243 * the hash table lock.
239 * 244 *
240 */ 245 */
241static u32 netlbl_unlhsh_hash(int ifindex) 246static u32 netlbl_unlhsh_hash(int ifindex)
242{ 247{
243 /* this is taken _almost_ directly from 248 return ifindex & (netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->size - 1);
244 * security/selinux/netif.c:sel_netif_hasfn() as they do pretty much
245 * the same thing */
246 return ifindex & (rcu_dereference(netlbl_unlhsh)->size - 1);
247} 249}
248 250
249/** 251/**
@@ -253,7 +255,8 @@ static u32 netlbl_unlhsh_hash(int ifindex)
253 * Description: 255 * Description:
254 * Searches the unlabeled connection hash table and returns a pointer to the 256 * Searches the unlabeled connection hash table and returns a pointer to the
255 * interface entry which matches @ifindex, otherwise NULL is returned. The 257 * interface entry which matches @ifindex, otherwise NULL is returned. The
256 * caller is responsible for calling the rcu_read_[un]lock() functions. 258 * caller is responsible for ensuring that the hash table is protected with
259 * either a RCU read lock or the hash table lock.
257 * 260 *
258 */ 261 */
259static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) 262static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
@@ -263,7 +266,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
263 struct netlbl_unlhsh_iface *iter; 266 struct netlbl_unlhsh_iface *iter;
264 267
265 bkt = netlbl_unlhsh_hash(ifindex); 268 bkt = netlbl_unlhsh_hash(ifindex);
266 bkt_list = &rcu_dereference(netlbl_unlhsh)->tbl[bkt]; 269 bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt];
267 list_for_each_entry_rcu(iter, bkt_list, list) 270 list_for_each_entry_rcu(iter, bkt_list, list)
268 if (iter->valid && iter->ifindex == ifindex) 271 if (iter->valid && iter->ifindex == ifindex)
269 return iter; 272 return iter;
@@ -272,33 +275,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
272} 275}
273 276
274/** 277/**
275 * netlbl_unlhsh_search_iface_def - Search for a matching interface entry
276 * @ifindex: the network interface
277 *
278 * Description:
279 * Searches the unlabeled connection hash table and returns a pointer to the
280 * interface entry which matches @ifindex. If an exact match can not be found
281 * and there is a valid default entry, the default entry is returned, otherwise
282 * NULL is returned. The caller is responsible for calling the
283 * rcu_read_[un]lock() functions.
284 *
285 */
286static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex)
287{
288 struct netlbl_unlhsh_iface *entry;
289
290 entry = netlbl_unlhsh_search_iface(ifindex);
291 if (entry != NULL)
292 return entry;
293
294 entry = rcu_dereference(netlbl_unlhsh_def);
295 if (entry != NULL && entry->valid)
296 return entry;
297
298 return NULL;
299}
300
301/**
302 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table 278 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table
303 * @iface: the associated interface entry 279 * @iface: the associated interface entry
304 * @addr: IPv4 address in network byte order 280 * @addr: IPv4 address in network byte order
@@ -308,8 +284,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex)
308 * Description: 284 * Description:
309 * Add a new address entry into the unlabeled connection hash table using the 285 * Add a new address entry into the unlabeled connection hash table using the
310 * interface entry specified by @iface. On success zero is returned, otherwise 286 * interface entry specified by @iface. On success zero is returned, otherwise
311 * a negative value is returned. The caller is responsible for calling the 287 * a negative value is returned.
312 * rcu_read_[un]lock() functions.
313 * 288 *
314 */ 289 */
315static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, 290static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
@@ -349,8 +324,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
349 * Description: 324 * Description:
350 * Add a new address entry into the unlabeled connection hash table using the 325 * Add a new address entry into the unlabeled connection hash table using the
351 * interface entry specified by @iface. On success zero is returned, otherwise 326 * interface entry specified by @iface. On success zero is returned, otherwise
352 * a negative value is returned. The caller is responsible for calling the 327 * a negative value is returned.
353 * rcu_read_[un]lock() functions.
354 * 328 *
355 */ 329 */
356static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, 330static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
@@ -391,8 +365,7 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
391 * Description: 365 * Description:
392 * Add a new, empty, interface entry into the unlabeled connection hash table. 366 * Add a new, empty, interface entry into the unlabeled connection hash table.
393 * On success a pointer to the new interface entry is returned, on failure NULL 367 * On success a pointer to the new interface entry is returned, on failure NULL
394 * is returned. The caller is responsible for calling the rcu_read_[un]lock() 368 * is returned.
395 * functions.
396 * 369 *
397 */ 370 */
398static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) 371static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
@@ -415,10 +388,10 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
415 if (netlbl_unlhsh_search_iface(ifindex) != NULL) 388 if (netlbl_unlhsh_search_iface(ifindex) != NULL)
416 goto add_iface_failure; 389 goto add_iface_failure;
417 list_add_tail_rcu(&iface->list, 390 list_add_tail_rcu(&iface->list,
418 &rcu_dereference(netlbl_unlhsh)->tbl[bkt]); 391 &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]);
419 } else { 392 } else {
420 INIT_LIST_HEAD(&iface->list); 393 INIT_LIST_HEAD(&iface->list);
421 if (rcu_dereference(netlbl_unlhsh_def) != NULL) 394 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
422 goto add_iface_failure; 395 goto add_iface_failure;
423 rcu_assign_pointer(netlbl_unlhsh_def, iface); 396 rcu_assign_pointer(netlbl_unlhsh_def, iface);
424 } 397 }
@@ -548,8 +521,7 @@ unlhsh_add_return:
548 * 521 *
549 * Description: 522 * Description:
550 * Remove an IP address entry from the unlabeled connection hash table. 523 * Remove an IP address entry from the unlabeled connection hash table.
551 * Returns zero on success, negative values on failure. The caller is 524 * Returns zero on success, negative values on failure.
552 * responsible for calling the rcu_read_[un]lock() functions.
553 * 525 *
554 */ 526 */
555static int netlbl_unlhsh_remove_addr4(struct net *net, 527static int netlbl_unlhsh_remove_addr4(struct net *net,
@@ -611,8 +583,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
611 * 583 *
612 * Description: 584 * Description:
613 * Remove an IP address entry from the unlabeled connection hash table. 585 * Remove an IP address entry from the unlabeled connection hash table.
614 * Returns zero on success, negative values on failure. The caller is 586 * Returns zero on success, negative values on failure.
615 * responsible for calling the rcu_read_[un]lock() functions.
616 * 587 *
617 */ 588 */
618static int netlbl_unlhsh_remove_addr6(struct net *net, 589static int netlbl_unlhsh_remove_addr6(struct net *net,
@@ -1547,8 +1518,10 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
1547 struct netlbl_unlhsh_iface *iface; 1518 struct netlbl_unlhsh_iface *iface;
1548 1519
1549 rcu_read_lock(); 1520 rcu_read_lock();
1550 iface = netlbl_unlhsh_search_iface_def(skb->skb_iif); 1521 iface = netlbl_unlhsh_search_iface(skb->skb_iif);
1551 if (iface == NULL) 1522 if (iface == NULL)
1523 iface = rcu_dereference(netlbl_unlhsh_def);
1524 if (iface == NULL || !iface->valid)
1552 goto unlabel_getattr_nolabel; 1525 goto unlabel_getattr_nolabel;
1553 switch (family) { 1526 switch (family) {
1554 case PF_INET: { 1527 case PF_INET: {
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 68706b4e3bf8..a3fd75ac3fa5 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -35,6 +35,7 @@
35#include <linux/audit.h> 35#include <linux/audit.h>
36#include <linux/tty.h> 36#include <linux/tty.h>
37#include <linux/security.h> 37#include <linux/security.h>
38#include <linux/gfp.h>
38#include <net/sock.h> 39#include <net/sock.h>
39#include <net/netlink.h> 40#include <net/netlink.h>
40#include <net/genetlink.h> 41#include <net/genetlink.h>
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 320d0423a240..6464a1972a69 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -545,7 +545,7 @@ static int netlink_autobind(struct socket *sock)
545 struct hlist_head *head; 545 struct hlist_head *head;
546 struct sock *osk; 546 struct sock *osk;
547 struct hlist_node *node; 547 struct hlist_node *node;
548 s32 pid = current->tgid; 548 s32 pid = task_tgid_vnr(current);
549 int err; 549 int err;
550 static s32 rover = -4097; 550 static s32 rover = -4097;
551 551
@@ -683,6 +683,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
683 struct netlink_sock *nlk = nlk_sk(sk); 683 struct netlink_sock *nlk = nlk_sk(sk);
684 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 684 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
685 685
686 if (alen < sizeof(addr->sa_family))
687 return -EINVAL;
688
686 if (addr->sa_family == AF_UNSPEC) { 689 if (addr->sa_family == AF_UNSPEC) {
687 sk->sk_state = NETLINK_UNCONNECTED; 690 sk->sk_state = NETLINK_UNCONNECTED;
688 nlk->dst_pid = 0; 691 nlk->dst_pid = 0;
@@ -1093,6 +1096,7 @@ static inline int do_one_set_err(struct sock *sk,
1093 struct netlink_set_err_data *p) 1096 struct netlink_set_err_data *p)
1094{ 1097{
1095 struct netlink_sock *nlk = nlk_sk(sk); 1098 struct netlink_sock *nlk = nlk_sk(sk);
1099 int ret = 0;
1096 1100
1097 if (sk == p->exclude_sk) 1101 if (sk == p->exclude_sk)
1098 goto out; 1102 goto out;
@@ -1104,10 +1108,15 @@ static inline int do_one_set_err(struct sock *sk,
1104 !test_bit(p->group - 1, nlk->groups)) 1108 !test_bit(p->group - 1, nlk->groups))
1105 goto out; 1109 goto out;
1106 1110
1111 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1112 ret = 1;
1113 goto out;
1114 }
1115
1107 sk->sk_err = p->code; 1116 sk->sk_err = p->code;
1108 sk->sk_error_report(sk); 1117 sk->sk_error_report(sk);
1109out: 1118out:
1110 return 0; 1119 return ret;
1111} 1120}
1112 1121
1113/** 1122/**
@@ -1116,12 +1125,16 @@ out:
1116 * @pid: the PID of a process that we want to skip (if any) 1125 * @pid: the PID of a process that we want to skip (if any)
1117 * @groups: the broadcast group that will notice the error 1126 * @groups: the broadcast group that will notice the error
1118 * @code: error code, must be negative (as usual in kernelspace) 1127 * @code: error code, must be negative (as usual in kernelspace)
1128 *
1129 * This function returns the number of broadcast listeners that have set the
1130 * NETLINK_RECV_NO_ENOBUFS socket option.
1119 */ 1131 */
1120void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) 1132int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1121{ 1133{
1122 struct netlink_set_err_data info; 1134 struct netlink_set_err_data info;
1123 struct hlist_node *node; 1135 struct hlist_node *node;
1124 struct sock *sk; 1136 struct sock *sk;
1137 int ret = 0;
1125 1138
1126 info.exclude_sk = ssk; 1139 info.exclude_sk = ssk;
1127 info.pid = pid; 1140 info.pid = pid;
@@ -1132,9 +1145,10 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1132 read_lock(&nl_table_lock); 1145 read_lock(&nl_table_lock);
1133 1146
1134 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1147 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1135 do_one_set_err(sk, &info); 1148 ret += do_one_set_err(sk, &info);
1136 1149
1137 read_unlock(&nl_table_lock); 1150 read_unlock(&nl_table_lock);
1151 return ret;
1138} 1152}
1139EXPORT_SYMBOL(netlink_set_err); 1153EXPORT_SYMBOL(netlink_set_err);
1140 1154
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index a4b6e148c5de..aa4308afcc7f 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/slab.h>
11#include <linux/errno.h> 12#include <linux/errno.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/socket.h> 14#include <linux/socket.h>
@@ -20,15 +21,17 @@
20 21
21static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ 22static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
22 23
23static inline void genl_lock(void) 24void genl_lock(void)
24{ 25{
25 mutex_lock(&genl_mutex); 26 mutex_lock(&genl_mutex);
26} 27}
28EXPORT_SYMBOL(genl_lock);
27 29
28static inline void genl_unlock(void) 30void genl_unlock(void)
29{ 31{
30 mutex_unlock(&genl_mutex); 32 mutex_unlock(&genl_mutex);
31} 33}
34EXPORT_SYMBOL(genl_unlock);
32 35
33#define GENL_FAM_TAB_SIZE 16 36#define GENL_FAM_TAB_SIZE 16
34#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) 37#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index a249127020a5..fa07f044b599 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/socket.h> 16#include <linux/socket.h>
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/slab.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
20#include <linux/timer.h> 21#include <linux/timer.h>
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 7aa11b01b2e2..64e6dde9749d 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -19,6 +19,7 @@
19#include <linux/fcntl.h> 19#include <linux/fcntl.h>
20#include <linux/in.h> 20#include <linux/in.h>
21#include <linux/if_ether.h> /* For the statistics structure. */ 21#include <linux/if_ether.h> /* For the statistics structure. */
22#include <linux/slab.h>
22 23
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index 68176483617f..6d4ef6d65b3d 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index f324d5df4186..94d4e922af53 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -7,6 +7,7 @@
7 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) 7 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
8 */ 8 */
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/slab.h>
10#include <linux/socket.h> 11#include <linux/socket.h>
11#include <linux/timer.h> 12#include <linux/timer.h>
12#include <net/ax25.h> 13#include <net/ax25.h>
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index e3e6c44e1890..607fddb4fdbb 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 5cc648012f50..44059d0c8dd1 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/sockios.h> 18#include <linux/sockios.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/slab.h>
20#include <net/ax25.h> 21#include <net/ax25.h>
21#include <linux/inet.h> 22#include <linux/inet.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 04e7d0d2fd8f..6a947ae50dbd 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/sockios.h> 16#include <linux/sockios.h>
17#include <linux/net.h> 17#include <linux/net.h>
18#include <linux/slab.h>
18#include <net/ax25.h> 19#include <net/ax25.h>
19#include <linux/inet.h> 20#include <linux/inet.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1612d417d10c..f162d59d8161 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -60,6 +60,7 @@
60#include <linux/wireless.h> 60#include <linux/wireless.h>
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/kmod.h> 62#include <linux/kmod.h>
63#include <linux/slab.h>
63#include <net/net_namespace.h> 64#include <net/net_namespace.h>
64#include <net/ip.h> 65#include <net/ip.h>
65#include <net/protocol.h> 66#include <net/protocol.h>
@@ -81,6 +82,7 @@
81#include <linux/mutex.h> 82#include <linux/mutex.h>
82#include <linux/if_vlan.h> 83#include <linux/if_vlan.h>
83#include <linux/virtio_net.h> 84#include <linux/virtio_net.h>
85#include <linux/errqueue.h>
84 86
85#ifdef CONFIG_INET 87#ifdef CONFIG_INET
86#include <net/inet_common.h> 88#include <net/inet_common.h>
@@ -314,6 +316,8 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
314 316
315static void packet_sock_destruct(struct sock *sk) 317static void packet_sock_destruct(struct sock *sk)
316{ 318{
319 skb_queue_purge(&sk->sk_error_queue);
320
317 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 321 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
318 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 322 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
319 323
@@ -482,6 +486,9 @@ retry:
482 skb->dev = dev; 486 skb->dev = dev;
483 skb->priority = sk->sk_priority; 487 skb->priority = sk->sk_priority;
484 skb->mark = sk->sk_mark; 488 skb->mark = sk->sk_mark;
489 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
490 if (err < 0)
491 goto out_unlock;
485 492
486 dev_queue_xmit(skb); 493 dev_queue_xmit(skb);
487 rcu_read_unlock(); 494 rcu_read_unlock();
@@ -1187,6 +1194,9 @@ static int packet_snd(struct socket *sock,
1187 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); 1194 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1188 if (err) 1195 if (err)
1189 goto out_free; 1196 goto out_free;
1197 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
1198 if (err < 0)
1199 goto out_free;
1190 1200
1191 skb->protocol = proto; 1201 skb->protocol = proto;
1192 skb->dev = dev; 1202 skb->dev = dev;
@@ -1486,6 +1496,51 @@ out:
1486 return err; 1496 return err;
1487} 1497}
1488 1498
1499static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1500{
1501 struct sock_exterr_skb *serr;
1502 struct sk_buff *skb, *skb2;
1503 int copied, err;
1504
1505 err = -EAGAIN;
1506 skb = skb_dequeue(&sk->sk_error_queue);
1507 if (skb == NULL)
1508 goto out;
1509
1510 copied = skb->len;
1511 if (copied > len) {
1512 msg->msg_flags |= MSG_TRUNC;
1513 copied = len;
1514 }
1515 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1516 if (err)
1517 goto out_free_skb;
1518
1519 sock_recv_timestamp(msg, sk, skb);
1520
1521 serr = SKB_EXT_ERR(skb);
1522 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1523 sizeof(serr->ee), &serr->ee);
1524
1525 msg->msg_flags |= MSG_ERRQUEUE;
1526 err = copied;
1527
1528 /* Reset and regenerate socket error */
1529 spin_lock_bh(&sk->sk_error_queue.lock);
1530 sk->sk_err = 0;
1531 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1532 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1533 spin_unlock_bh(&sk->sk_error_queue.lock);
1534 sk->sk_error_report(sk);
1535 } else
1536 spin_unlock_bh(&sk->sk_error_queue.lock);
1537
1538out_free_skb:
1539 kfree_skb(skb);
1540out:
1541 return err;
1542}
1543
1489/* 1544/*
1490 * Pull a packet from our receive queue and hand it to the user. 1545 * Pull a packet from our receive queue and hand it to the user.
1491 * If necessary we block. 1546 * If necessary we block.
@@ -1501,7 +1556,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1501 int vnet_hdr_len = 0; 1556 int vnet_hdr_len = 0;
1502 1557
1503 err = -EINVAL; 1558 err = -EINVAL;
1504 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) 1559 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1505 goto out; 1560 goto out;
1506 1561
1507#if 0 1562#if 0
@@ -1510,6 +1565,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1510 return -ENODEV; 1565 return -ENODEV;
1511#endif 1566#endif
1512 1567
1568 if (flags & MSG_ERRQUEUE) {
1569 err = packet_recv_error(sk, msg, len);
1570 goto out;
1571 }
1572
1513 /* 1573 /*
1514 * Call the generic datagram receiver. This handles all sorts 1574 * Call the generic datagram receiver. This handles all sorts
1515 * of horrible races and re-entrancy so we can forget about it 1575 * of horrible races and re-entrancy so we can forget about it
@@ -1691,9 +1751,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1691 if (i->alen != dev->addr_len) 1751 if (i->alen != dev->addr_len)
1692 return -EINVAL; 1752 return -EINVAL;
1693 if (what > 0) 1753 if (what > 0)
1694 return dev_mc_add(dev, i->addr, i->alen, 0); 1754 return dev_mc_add(dev, i->addr);
1695 else 1755 else
1696 return dev_mc_delete(dev, i->addr, i->alen, 0); 1756 return dev_mc_del(dev, i->addr);
1697 break; 1757 break;
1698 case PACKET_MR_PROMISC: 1758 case PACKET_MR_PROMISC:
1699 return dev_set_promiscuity(dev, what); 1759 return dev_set_promiscuity(dev, what);
@@ -1705,9 +1765,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1705 if (i->alen != dev->addr_len) 1765 if (i->alen != dev->addr_len)
1706 return -EINVAL; 1766 return -EINVAL;
1707 if (what > 0) 1767 if (what > 0)
1708 return dev_unicast_add(dev, i->addr); 1768 return dev_uc_add(dev, i->addr);
1709 else 1769 else
1710 return dev_unicast_delete(dev, i->addr); 1770 return dev_uc_del(dev, i->addr);
1711 break; 1771 break;
1712 default: 1772 default:
1713 break; 1773 break;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 526d0273991a..73aee7f2fcdc 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/slab.h>
28#include <asm/unaligned.h> 29#include <asm/unaligned.h>
29#include <net/sock.h> 30#include <net/sock.h>
30 31
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 387197b579b1..1bd38db4fe1e 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/slab.h>
27#include <linux/socket.h> 28#include <linux/socket.h>
28#include <asm/ioctls.h> 29#include <asm/ioctls.h>
29#include <net/sock.h> 30#include <net/sock.h>
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 360cf377693e..e2a95762abd3 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h>
26#include <linux/socket.h> 27#include <linux/socket.h>
27#include <net/sock.h> 28#include <net/sock.h>
28#include <net/tcp_states.h> 29#include <net/tcp_states.h>
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 5c6ae0c701c0..9b4ced6e0968 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/slab.h>
28#include <linux/netdevice.h> 29#include <linux/netdevice.h>
29#include <linux/phonet.h> 30#include <linux/phonet.h>
30#include <linux/proc_fs.h> 31#include <linux/proc_fs.h>
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index fe2e7088ee07..58b3b1f991ed 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/netlink.h> 27#include <linux/netlink.h>
28#include <linux/phonet.h> 28#include <linux/phonet.h>
29#include <linux/slab.h>
29#include <net/sock.h> 30#include <net/sock.h>
30#include <net/phonet/pn_dev.h> 31#include <net/phonet/pn_dev.h>
31 32
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 69c8b826a0ce..c785bfd0744f 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -23,6 +23,7 @@
23 * 02110-1301 USA 23 * 02110-1301 USA
24 */ 24 */
25 25
26#include <linux/gfp.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/net.h> 28#include <linux/net.h>
28#include <linux/poll.h> 29#include <linux/poll.h>
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 937ecda4abe7..7919a9edb8e9 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/gfp.h>
36#include <linux/in.h> 37#include <linux/in.h>
37#include <linux/poll.h> 38#include <linux/poll.h>
38#include <net/sock.h> 39#include <net/sock.h>
@@ -451,7 +452,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
451 struct rds_info_lengths *lens) 452 struct rds_info_lengths *lens)
452{ 453{
453 struct rds_sock *rs; 454 struct rds_sock *rs;
454 struct sock *sk;
455 struct rds_incoming *inc; 455 struct rds_incoming *inc;
456 unsigned long flags; 456 unsigned long flags;
457 unsigned int total = 0; 457 unsigned int total = 0;
@@ -461,7 +461,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
461 spin_lock_irqsave(&rds_sock_lock, flags); 461 spin_lock_irqsave(&rds_sock_lock, flags);
462 462
463 list_for_each_entry(rs, &rds_sock_list, rs_item) { 463 list_for_each_entry(rs, &rds_sock_list, rs_item) {
464 sk = rds_rs_to_sk(rs);
465 read_lock(&rs->rs_recv_lock); 464 read_lock(&rs->rs_recv_lock);
466 465
467 /* XXX too lazy to maintain counts.. */ 466 /* XXX too lazy to maintain counts.. */
diff --git a/net/rds/cong.c b/net/rds/cong.c
index dd2711df640b..0871a29f0780 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -30,6 +30,7 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 * 31 *
32 */ 32 */
33#include <linux/slab.h>
33#include <linux/types.h> 34#include <linux/types.h>
34#include <linux/rbtree.h> 35#include <linux/rbtree.h>
35 36
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 278f607ab603..7619b671ca28 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/slab.h>
35#include <net/inet_hashtables.h> 36#include <net/inet_hashtables.h>
36 37
37#include "rds.h" 38#include "rds.h"
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 3b8992361042..8f2d6dd7700a 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -37,6 +37,7 @@
37#include <linux/inetdevice.h> 37#include <linux/inetdevice.h>
38#include <linux/if_arp.h> 38#include <linux/if_arp.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/slab.h>
40 41
41#include "rds.h" 42#include "rds.h"
42#include "ib.h" 43#include "ib.h"
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index e1f124bf03bb..10ed0d55f759 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/slab.h>
35#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
36 37
37#include "rds.h" 38#include "rds.h"
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index cfb1d904ed00..a54cd63f9e35 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34 35
35#include "rds.h" 36#include "rds.h"
36#include "rdma.h" 37#include "rdma.h"
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c338881eca71..c74e9904a6b2 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <rdma/rdma_cm.h> 37#include <rdma/rdma_cm.h>
diff --git a/net/rds/info.c b/net/rds/info.c
index 814a91a6f4a7..c45c4173a44d 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/percpu.h> 33#include <linux/percpu.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/slab.h>
35#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
36 37
37#include "rds.h" 38#include "rds.h"
diff --git a/net/rds/iw.c b/net/rds/iw.c
index b28fa8525b24..c8f3d3525cb9 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -37,6 +37,7 @@
37#include <linux/inetdevice.h> 37#include <linux/inetdevice.h>
38#include <linux/if_arp.h> 38#include <linux/if_arp.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/slab.h>
40 41
41#include "rds.h" 42#include "rds.h"
42#include "iw.h" 43#include "iw.h"
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 6bc638fd252c..a9d951b4fbae 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/slab.h>
35#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
36 37
37#include "rds.h" 38#include "rds.h"
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 9eda11cca956..13dc1862d862 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34 35
35#include "rds.h" 36#include "rds.h"
36#include "rdma.h" 37#include "rdma.h"
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 337e4e5025e2..3d479067d54d 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <rdma/rdma_cm.h> 37#include <rdma/rdma_cm.h>
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 93a45f1ce61f..dd9879379457 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/in.h> 35#include <linux/in.h>
35 36
36#include "rds.h" 37#include "rds.h"
diff --git a/net/rds/message.c b/net/rds/message.c
index 73e600ffd87f..9a1d67e001ba 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34 35
35#include "rds.h" 36#include "rds.h"
36#include "rdma.h" 37#include "rdma.h"
diff --git a/net/rds/page.c b/net/rds/page.c
index 36790122dfd4..595a952d4b17 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/highmem.h> 33#include <linux/highmem.h>
34#include <linux/gfp.h>
34 35
35#include "rds.h" 36#include "rds.h"
36 37
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 61b359d9dffd..75fd13bb631b 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/pagemap.h> 33#include <linux/pagemap.h>
34#include <linux/slab.h>
34#include <linux/rbtree.h> 35#include <linux/rbtree.h>
35#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ 36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
36 37
diff --git a/net/rds/recv.c b/net/rds/recv.c
index b426d67f760c..e2a2b9344f7b 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <linux/in.h> 36#include <linux/in.h>
36 37
diff --git a/net/rds/send.c b/net/rds/send.c
index 4629a0b63bbd..53d6795ac9d0 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/gfp.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <linux/in.h> 36#include <linux/in.h>
36#include <linux/list.h> 37#include <linux/list.h>
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index b5198aee45d3..babf4577ff7d 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/in.h> 35#include <linux/in.h>
35#include <net/tcp.h> 36#include <net/tcp.h>
36 37
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 53cb1b54165d..975183fe6950 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/gfp.h>
34#include <linux/in.h> 35#include <linux/in.h>
35#include <net/tcp.h> 36#include <net/tcp.h>
36 37
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 40bfcf887465..1aba6878fa5d 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <net/tcp.h> 35#include <net/tcp.h>
35 36
36#include "rds.h" 37#include "rds.h"
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index c218e07e5caf..51875a0c5d48 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -33,6 +33,7 @@
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/poll.h> 34#include <linux/poll.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/slab.h>
36 37
37#include "rfkill.h" 38#include "rfkill.h"
38 39
@@ -628,6 +629,49 @@ static ssize_t rfkill_persistent_show(struct device *dev,
628 return sprintf(buf, "%d\n", rfkill->persistent); 629 return sprintf(buf, "%d\n", rfkill->persistent);
629} 630}
630 631
632static ssize_t rfkill_hard_show(struct device *dev,
633 struct device_attribute *attr,
634 char *buf)
635{
636 struct rfkill *rfkill = to_rfkill(dev);
637
638 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
639}
640
641static ssize_t rfkill_soft_show(struct device *dev,
642 struct device_attribute *attr,
643 char *buf)
644{
645 struct rfkill *rfkill = to_rfkill(dev);
646
647 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
648}
649
650static ssize_t rfkill_soft_store(struct device *dev,
651 struct device_attribute *attr,
652 const char *buf, size_t count)
653{
654 struct rfkill *rfkill = to_rfkill(dev);
655 unsigned long state;
656 int err;
657
658 if (!capable(CAP_NET_ADMIN))
659 return -EPERM;
660
661 err = strict_strtoul(buf, 0, &state);
662 if (err)
663 return err;
664
665 if (state > 1 )
666 return -EINVAL;
667
668 mutex_lock(&rfkill_global_mutex);
669 rfkill_set_block(rfkill, state);
670 mutex_unlock(&rfkill_global_mutex);
671
672 return err ?: count;
673}
674
631static u8 user_state_from_blocked(unsigned long state) 675static u8 user_state_from_blocked(unsigned long state)
632{ 676{
633 if (state & RFKILL_BLOCK_HW) 677 if (state & RFKILL_BLOCK_HW)
@@ -643,14 +687,8 @@ static ssize_t rfkill_state_show(struct device *dev,
643 char *buf) 687 char *buf)
644{ 688{
645 struct rfkill *rfkill = to_rfkill(dev); 689 struct rfkill *rfkill = to_rfkill(dev);
646 unsigned long flags;
647 u32 state;
648
649 spin_lock_irqsave(&rfkill->lock, flags);
650 state = rfkill->state;
651 spin_unlock_irqrestore(&rfkill->lock, flags);
652 690
653 return sprintf(buf, "%d\n", user_state_from_blocked(state)); 691 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
654} 692}
655 693
656static ssize_t rfkill_state_store(struct device *dev, 694static ssize_t rfkill_state_store(struct device *dev,
@@ -700,6 +738,8 @@ static struct device_attribute rfkill_dev_attrs[] = {
700 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), 738 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
701 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), 739 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
702 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), 740 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
741 __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
742 __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
703 __ATTR_NULL 743 __ATTR_NULL
704}; 744};
705 745
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index e90b9b6c16ae..4fb711a035f4 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -18,6 +18,7 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/socket.h> 19#include <linux/socket.h>
20#include <linux/in.h> 20#include <linux/in.h>
21#include <linux/slab.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 424b893d1450..178ff4f73c85 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -19,6 +19,7 @@
19#include <linux/fcntl.h> 19#include <linux/fcntl.h>
20#include <linux/in.h> 20#include <linux/in.h>
21#include <linux/if_ether.h> 21#include <linux/if_ether.h>
22#include <linux/slab.h>
22 23
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/io.h> 25#include <asm/io.h>
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index 5ef5f6988a2e..a750a28e0221 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 968e8bac1b5d..ae4a9d99aec7 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -7,6 +7,7 @@
7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 */ 8 */
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/slab.h>
10#include <linux/socket.h> 11#include <linux/socket.h>
11#include <linux/timer.h> 12#include <linux/timer.h>
12#include <net/ax25.h> 13#include <net/ax25.h>
diff --git a/net/rose/rose_out.c b/net/rose/rose_out.c
index 69820f93414b..4ebf33afbe47 100644
--- a/net/rose/rose_out.c
+++ b/net/rose/rose_out.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/sockios.h> 16#include <linux/sockios.h>
17#include <linux/net.h> 17#include <linux/net.h>
18#include <linux/gfp.h>
18#include <net/ax25.h> 19#include <net/ax25.h>
19#include <linux/inet.h> 20#include <linux/inet.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 70a0b3b4b4d2..cbc244a128bd 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index b05108f382da..1734abba26a2 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/sockios.h> 16#include <linux/sockios.h>
17#include <linux/net.h> 17#include <linux/net.h>
18#include <linux/slab.h>
18#include <net/ax25.h> 19#include <net/ax25.h>
19#include <linux/inet.h> 20#include <linux/inet.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 287b1415cee9..c060095b27ce 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/net.h> 13#include <linux/net.h>
14#include <linux/slab.h>
14#include <linux/skbuff.h> 15#include <linux/skbuff.h>
15#include <linux/poll.h> 16#include <linux/poll.h>
16#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index 77228f28fa36..6d79310fcaae 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -17,6 +17,7 @@
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/in6.h> 18#include <linux/in6.h>
19#include <linux/icmp.h> 19#include <linux/icmp.h>
20#include <linux/gfp.h>
20#include <net/sock.h> 21#include <net/sock.h>
21#include <net/af_rxrpc.h> 22#include <net/af_rxrpc.h>
22#include <net/ip.h> 23#include <net/ip.h>
@@ -88,6 +89,11 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
88 89
89 /* get a notification message to send to the server app */ 90 /* get a notification message to send to the server app */
90 notification = alloc_skb(0, GFP_NOFS); 91 notification = alloc_skb(0, GFP_NOFS);
92 if (!notification) {
93 _debug("no memory");
94 ret = -ENOMEM;
95 goto error_nofree;
96 }
91 rxrpc_new_skb(notification); 97 rxrpc_new_skb(notification);
92 notification->mark = RXRPC_SKB_MARK_NEW_CALL; 98 notification->mark = RXRPC_SKB_MARK_NEW_CALL;
93 99
@@ -189,6 +195,7 @@ invalid_service:
189 ret = -ECONNREFUSED; 195 ret = -ECONNREFUSED;
190error: 196error:
191 rxrpc_free_skb(notification); 197 rxrpc_free_skb(notification);
198error_nofree:
192 _leave(" = %d", ret); 199 _leave(" = %d", ret);
193 return ret; 200 return ret;
194} 201}
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index b4a220977031..2714da167fb8 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -13,6 +13,7 @@
13#include <linux/circ_buf.h> 13#include <linux/circ_buf.h>
14#include <linux/net.h> 14#include <linux/net.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/slab.h>
16#include <linux/udp.h> 17#include <linux/udp.h>
17#include <net/sock.h> 18#include <net/sock.h>
18#include <net/af_rxrpc.h> 19#include <net/af_rxrpc.h>
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index bc0019f704fe..909d092de9f4 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -9,6 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/circ_buf.h> 14#include <linux/circ_buf.h>
14#include <net/sock.h> 15#include <net/sock.h>
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 9f1ce841a0bb..4106ca95ec86 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/net.h> 14#include <linux/net.h>
14#include <linux/skbuff.h> 15#include <linux/skbuff.h>
15#include <linux/crypto.h> 16#include <linux/crypto.h>
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index f98c8027e5c1..89315009bab1 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -17,6 +17,7 @@
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/in6.h> 18#include <linux/in6.h>
19#include <linux/icmp.h> 19#include <linux/icmp.h>
20#include <linux/gfp.h>
20#include <net/sock.h> 21#include <net/sock.h>
21#include <net/af_rxrpc.h> 22#include <net/af_rxrpc.h>
22#include <net/ip.h> 23#include <net/ip.h>
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 74697b200496..5ee16f0353fe 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -18,6 +18,7 @@
18#include <linux/key-type.h> 18#include <linux/key-type.h>
19#include <linux/crypto.h> 19#include <linux/crypto.h>
20#include <linux/ctype.h> 20#include <linux/ctype.h>
21#include <linux/slab.h>
21#include <net/sock.h> 22#include <net/sock.h>
22#include <net/af_rxrpc.h> 23#include <net/af_rxrpc.h>
23#include <keys/rxrpc-type.h> 24#include <keys/rxrpc-type.h>
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index 807535ff29b5..87f7135d238b 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/net.h> 13#include <linux/net.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15#include <net/sock.h> 16#include <net/sock.h>
16#include <net/af_rxrpc.h> 17#include <net/af_rxrpc.h>
17#include "ar-internal.h" 18#include "ar-internal.h"
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index cc9102c5b588..5f22e263eda7 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/net.h> 12#include <linux/net.h>
13#include <linux/gfp.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14#include <linux/circ_buf.h> 15#include <linux/circ_buf.h>
15#include <net/sock.h> 16#include <net/sock.h>
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index edc026c1eb76..f0f85b0123f7 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -16,6 +16,7 @@
16#include <linux/in.h> 16#include <linux/in.h>
17#include <linux/in6.h> 17#include <linux/in6.h>
18#include <linux/icmp.h> 18#include <linux/icmp.h>
19#include <linux/slab.h>
19#include <net/sock.h> 20#include <net/sock.h>
20#include <net/af_rxrpc.h> 21#include <net/af_rxrpc.h>
21#include <net/ip.h> 22#include <net/ip.h>
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index 0936e1acc30e..5e0226fe587e 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/net.h> 13#include <linux/net.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15#include <net/sock.h> 16#include <net/sock.h>
16#include <net/af_rxrpc.h> 17#include <net/af_rxrpc.h>
17#include "ar-internal.h" 18#include "ar-internal.h"
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 713ac593e2e9..7635107726ce 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -16,6 +16,7 @@
16#include <linux/crypto.h> 16#include <linux/crypto.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <linux/slab.h>
19#include <net/sock.h> 20#include <net/sock.h>
20#include <net/af_rxrpc.h> 21#include <net/af_rxrpc.h>
21#include <keys/rxrpc-type.h> 22#include <keys/rxrpc-type.h>
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 21f9c7678aa3..2f691fb180d1 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -328,13 +328,16 @@ config NET_CLS_FLOW
328 module will be called cls_flow. 328 module will be called cls_flow.
329 329
330config NET_CLS_CGROUP 330config NET_CLS_CGROUP
331 bool "Control Group Classifier" 331 tristate "Control Group Classifier"
332 select NET_CLS 332 select NET_CLS
333 depends on CGROUPS 333 depends on CGROUPS
334 ---help--- 334 ---help---
335 Say Y here if you want to classify packets based on the control 335 Say Y here if you want to classify packets based on the control
336 cgroup of their process. 336 cgroup of their process.
337 337
338 To compile this code as a module, choose M here: the
339 module will be called cls_cgroup.
340
338config NET_EMATCH 341config NET_EMATCH
339 bool "Extended Matches" 342 bool "Extended Matches"
340 select NET_CLS 343 select NET_CLS
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 64f5e328cee9..019045174fc3 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/slab.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/kmod.h> 21#include <linux/kmod.h>
@@ -667,7 +668,8 @@ nlmsg_failure:
667} 668}
668 669
669static int 670static int
670act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) 671act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
672 struct tc_action *a, int event)
671{ 673{
672 struct sk_buff *skb; 674 struct sk_buff *skb;
673 675
@@ -679,7 +681,7 @@ act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
679 return -EINVAL; 681 return -EINVAL;
680 } 682 }
681 683
682 return rtnl_unicast(skb, &init_net, pid); 684 return rtnl_unicast(skb, net, pid);
683} 685}
684 686
685static struct tc_action * 687static struct tc_action *
@@ -749,7 +751,8 @@ static struct tc_action *create_a(int i)
749 return act; 751 return act;
750} 752}
751 753
752static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 754static int tca_action_flush(struct net *net, struct nlattr *nla,
755 struct nlmsghdr *n, u32 pid)
753{ 756{
754 struct sk_buff *skb; 757 struct sk_buff *skb;
755 unsigned char *b; 758 unsigned char *b;
@@ -808,7 +811,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
808 nlh->nlmsg_flags |= NLM_F_ROOT; 811 nlh->nlmsg_flags |= NLM_F_ROOT;
809 module_put(a->ops->owner); 812 module_put(a->ops->owner);
810 kfree(a); 813 kfree(a);
811 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 814 err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
812 if (err > 0) 815 if (err > 0)
813 return 0; 816 return 0;
814 817
@@ -825,7 +828,8 @@ noflush_out:
825} 828}
826 829
827static int 830static int
828tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) 831tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
832 u32 pid, int event)
829{ 833{
830 int i, ret; 834 int i, ret;
831 struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; 835 struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
@@ -837,7 +841,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
837 841
838 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { 842 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
839 if (tb[1] != NULL) 843 if (tb[1] != NULL)
840 return tca_action_flush(tb[1], n, pid); 844 return tca_action_flush(net, tb[1], n, pid);
841 else 845 else
842 return -EINVAL; 846 return -EINVAL;
843 } 847 }
@@ -858,7 +862,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
858 } 862 }
859 863
860 if (event == RTM_GETACTION) 864 if (event == RTM_GETACTION)
861 ret = act_get_notify(pid, n, head, event); 865 ret = act_get_notify(net, pid, n, head, event);
862 else { /* delete */ 866 else { /* delete */
863 struct sk_buff *skb; 867 struct sk_buff *skb;
864 868
@@ -877,7 +881,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
877 881
878 /* now do the delete */ 882 /* now do the delete */
879 tcf_action_destroy(head, 0); 883 tcf_action_destroy(head, 0);
880 ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, 884 ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
881 n->nlmsg_flags&NLM_F_ECHO); 885 n->nlmsg_flags&NLM_F_ECHO);
882 if (ret > 0) 886 if (ret > 0)
883 return 0; 887 return 0;
@@ -888,8 +892,8 @@ err:
888 return ret; 892 return ret;
889} 893}
890 894
891static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, 895static int tcf_add_notify(struct net *net, struct tc_action *a,
892 u16 flags) 896 u32 pid, u32 seq, int event, u16 flags)
893{ 897{
894 struct tcamsg *t; 898 struct tcamsg *t;
895 struct nlmsghdr *nlh; 899 struct nlmsghdr *nlh;
@@ -922,7 +926,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
922 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 926 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
923 NETLINK_CB(skb).dst_group = RTNLGRP_TC; 927 NETLINK_CB(skb).dst_group = RTNLGRP_TC;
924 928
925 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); 929 err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
926 if (err > 0) 930 if (err > 0)
927 err = 0; 931 err = 0;
928 return err; 932 return err;
@@ -935,7 +939,8 @@ nlmsg_failure:
935 939
936 940
937static int 941static int
938tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) 942tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
943 u32 pid, int ovr)
939{ 944{
940 int ret = 0; 945 int ret = 0;
941 struct tc_action *act; 946 struct tc_action *act;
@@ -953,7 +958,7 @@ tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr)
953 /* dump then free all the actions after update; inserted policy 958 /* dump then free all the actions after update; inserted policy
954 * stays intact 959 * stays intact
955 * */ 960 * */
956 ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); 961 ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
957 for (a = act; a; a = act) { 962 for (a = act; a; a = act) {
958 act = a->next; 963 act = a->next;
959 kfree(a); 964 kfree(a);
@@ -969,9 +974,6 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
969 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 974 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
970 int ret = 0, ovr = 0; 975 int ret = 0, ovr = 0;
971 976
972 if (!net_eq(net, &init_net))
973 return -EINVAL;
974
975 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 977 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
976 if (ret < 0) 978 if (ret < 0)
977 return ret; 979 return ret;
@@ -994,15 +996,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
994 if (n->nlmsg_flags&NLM_F_REPLACE) 996 if (n->nlmsg_flags&NLM_F_REPLACE)
995 ovr = 1; 997 ovr = 1;
996replay: 998replay:
997 ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr); 999 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
998 if (ret == -EAGAIN) 1000 if (ret == -EAGAIN)
999 goto replay; 1001 goto replay;
1000 break; 1002 break;
1001 case RTM_DELACTION: 1003 case RTM_DELACTION:
1002 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); 1004 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1005 pid, RTM_DELACTION);
1003 break; 1006 break;
1004 case RTM_GETACTION: 1007 case RTM_GETACTION:
1005 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); 1008 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1009 pid, RTM_GETACTION);
1006 break; 1010 break;
1007 default: 1011 default:
1008 BUG(); 1012 BUG();
@@ -1042,7 +1046,6 @@ find_dump_kind(const struct nlmsghdr *n)
1042static int 1046static int
1043tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1047tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1044{ 1048{
1045 struct net *net = sock_net(skb->sk);
1046 struct nlmsghdr *nlh; 1049 struct nlmsghdr *nlh;
1047 unsigned char *b = skb_tail_pointer(skb); 1050 unsigned char *b = skb_tail_pointer(skb);
1048 struct nlattr *nest; 1051 struct nlattr *nest;
@@ -1052,9 +1055,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1052 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 1055 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
1053 struct nlattr *kind = find_dump_kind(cb->nlh); 1056 struct nlattr *kind = find_dump_kind(cb->nlh);
1054 1057
1055 if (!net_eq(net, &init_net))
1056 return 0;
1057
1058 if (kind == NULL) { 1058 if (kind == NULL) {
1059 printk("tc_dump_action: action bad kind\n"); 1059 printk("tc_dump_action: action bad kind\n");
1060 return 0; 1060 return 0;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index b9f79c251d75..03f80a0fa167 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -19,6 +19,7 @@
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/slab.h>
22#include <net/netlink.h> 23#include <net/netlink.h>
23#include <net/pkt_sched.h> 24#include <net/pkt_sched.h>
24#include <linux/tc_act/tc_ipt.h> 25#include <linux/tc_act/tc_ipt.h>
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index d329170243cb..c046682054eb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -20,6 +20,7 @@
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/gfp.h>
23#include <net/net_namespace.h> 24#include <net/net_namespace.h>
24#include <net/netlink.h> 25#include <net/netlink.h>
25#include <net/pkt_sched.h> 26#include <net/pkt_sched.h>
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 6b0359a500e6..b7dcfedc802e 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -17,6 +17,7 @@
17#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/slab.h>
20#include <net/netlink.h> 21#include <net/netlink.h>
21#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
22#include <linux/tc_act/tc_pedit.h> 23#include <linux/tc_act/tc_pedit.h>
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 723964c3ee4f..654f73dff7c1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/slab.h>
21#include <net/act_api.h> 22#include <net/act_api.h>
22#include <net/netlink.h> 23#include <net/netlink.h>
23 24
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 8daa1ebc7413..622ca809c15c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 3725d8fa29db..5fd0c28ef79a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -24,6 +24,7 @@
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25#include <linux/netlink.h> 25#include <linux/netlink.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/slab.h>
27#include <net/net_namespace.h> 28#include <net/net_namespace.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <net/netlink.h> 30#include <net/netlink.h>
@@ -98,8 +99,9 @@ out:
98} 99}
99EXPORT_SYMBOL(unregister_tcf_proto_ops); 100EXPORT_SYMBOL(unregister_tcf_proto_ops);
100 101
101static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, 102static int tfilter_notify(struct net *net, struct sk_buff *oskb,
102 struct tcf_proto *tp, unsigned long fh, int event); 103 struct nlmsghdr *n, struct tcf_proto *tp,
104 unsigned long fh, int event);
103 105
104 106
105/* Select new prio value from the range, managed by kernel. */ 107/* Select new prio value from the range, managed by kernel. */
@@ -137,9 +139,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
137 int err; 139 int err;
138 int tp_created = 0; 140 int tp_created = 0;
139 141
140 if (!net_eq(net, &init_net))
141 return -EINVAL;
142
143replay: 142replay:
144 t = NLMSG_DATA(n); 143 t = NLMSG_DATA(n);
145 protocol = TC_H_MIN(t->tcm_info); 144 protocol = TC_H_MIN(t->tcm_info);
@@ -158,7 +157,7 @@ replay:
158 /* Find head of filter chain. */ 157 /* Find head of filter chain. */
159 158
160 /* Find link */ 159 /* Find link */
161 dev = __dev_get_by_index(&init_net, t->tcm_ifindex); 160 dev = __dev_get_by_index(net, t->tcm_ifindex);
162 if (dev == NULL) 161 if (dev == NULL)
163 return -ENODEV; 162 return -ENODEV;
164 163
@@ -282,7 +281,7 @@ replay:
282 *back = tp->next; 281 *back = tp->next;
283 spin_unlock_bh(root_lock); 282 spin_unlock_bh(root_lock);
284 283
285 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 284 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
286 tcf_destroy(tp); 285 tcf_destroy(tp);
287 err = 0; 286 err = 0;
288 goto errout; 287 goto errout;
@@ -305,10 +304,10 @@ replay:
305 case RTM_DELTFILTER: 304 case RTM_DELTFILTER:
306 err = tp->ops->delete(tp, fh); 305 err = tp->ops->delete(tp, fh);
307 if (err == 0) 306 if (err == 0)
308 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 307 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
309 goto errout; 308 goto errout;
310 case RTM_GETTFILTER: 309 case RTM_GETTFILTER:
311 err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); 310 err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
312 goto errout; 311 goto errout;
313 default: 312 default:
314 err = -EINVAL; 313 err = -EINVAL;
@@ -324,7 +323,7 @@ replay:
324 *back = tp; 323 *back = tp;
325 spin_unlock_bh(root_lock); 324 spin_unlock_bh(root_lock);
326 } 325 }
327 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); 326 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
328 } else { 327 } else {
329 if (tp_created) 328 if (tp_created)
330 tcf_destroy(tp); 329 tcf_destroy(tp);
@@ -370,8 +369,9 @@ nla_put_failure:
370 return -1; 369 return -1;
371} 370}
372 371
373static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, 372static int tfilter_notify(struct net *net, struct sk_buff *oskb,
374 struct tcf_proto *tp, unsigned long fh, int event) 373 struct nlmsghdr *n, struct tcf_proto *tp,
374 unsigned long fh, int event)
375{ 375{
376 struct sk_buff *skb; 376 struct sk_buff *skb;
377 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 377 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -385,7 +385,7 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
385 return -EINVAL; 385 return -EINVAL;
386 } 386 }
387 387
388 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, 388 return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
389 n->nlmsg_flags & NLM_F_ECHO); 389 n->nlmsg_flags & NLM_F_ECHO);
390} 390}
391 391
@@ -418,12 +418,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
418 const struct Qdisc_class_ops *cops; 418 const struct Qdisc_class_ops *cops;
419 struct tcf_dump_args arg; 419 struct tcf_dump_args arg;
420 420
421 if (!net_eq(net, &init_net))
422 return 0;
423
424 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 421 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
425 return skb->len; 422 return skb->len;
426 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 423 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
427 return skb->len; 424 return skb->len;
428 425
429 if (!tcm->tcm_parent) 426 if (!tcm->tcm_parent)
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 4e2bda854119..efd4f95fd050 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/string.h> 16#include <linux/string.h>
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index e4877ca6727c..221180384fd7 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/string.h> 15#include <linux/string.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
@@ -24,6 +25,25 @@ struct cgroup_cls_state
24 u32 classid; 25 u32 classid;
25}; 26};
26 27
28static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
29 struct cgroup *cgrp);
30static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
31static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
32
33struct cgroup_subsys net_cls_subsys = {
34 .name = "net_cls",
35 .create = cgrp_create,
36 .destroy = cgrp_destroy,
37 .populate = cgrp_populate,
38#ifdef CONFIG_NET_CLS_CGROUP
39 .subsys_id = net_cls_subsys_id,
40#else
41#define net_cls_subsys_id net_cls_subsys.subsys_id
42#endif
43 .module = THIS_MODULE,
44};
45
46
27static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) 47static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
28{ 48{
29 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), 49 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
@@ -79,14 +99,6 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
79 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 99 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
80} 100}
81 101
82struct cgroup_subsys net_cls_subsys = {
83 .name = "net_cls",
84 .create = cgrp_create,
85 .destroy = cgrp_destroy,
86 .populate = cgrp_populate,
87 .subsys_id = net_cls_subsys_id,
88};
89
90struct cls_cgroup_head 102struct cls_cgroup_head
91{ 103{
92 u32 handle; 104 u32 handle;
@@ -277,12 +289,19 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
277 289
278static int __init init_cgroup_cls(void) 290static int __init init_cgroup_cls(void)
279{ 291{
280 return register_tcf_proto_ops(&cls_cgroup_ops); 292 int ret = register_tcf_proto_ops(&cls_cgroup_ops);
293 if (ret)
294 return ret;
295 ret = cgroup_load_subsys(&net_cls_subsys);
296 if (ret)
297 unregister_tcf_proto_ops(&cls_cgroup_ops);
298 return ret;
281} 299}
282 300
283static void __exit exit_cgroup_cls(void) 301static void __exit exit_cgroup_cls(void)
284{ 302{
285 unregister_tcf_proto_ops(&cls_cgroup_ops); 303 unregister_tcf_proto_ops(&cls_cgroup_ops);
304 cgroup_unload_subsys(&net_cls_subsys);
286} 305}
287 306
288module_init(init_cgroup_cls); 307module_init(init_cgroup_cls);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e054c62857e1..6ed61b10e002 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -20,6 +20,7 @@
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/ipv6.h> 21#include <linux/ipv6.h>
22#include <linux/if_vlan.h> 22#include <linux/if_vlan.h>
23#include <linux/slab.h>
23 24
24#include <net/pkt_cls.h> 25#include <net/pkt_cls.h>
25#include <net/ip.h> 26#include <net/ip.h>
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 6d6e87585fb1..93b0a7b6f9b4 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/string.h> 25#include <linux/string.h>
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index dd872d5383ef..694dcd85dec8 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/string.h> 16#include <linux/string.h>
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index e806f2314b5e..20ef330bb918 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h>
12#include <net/act_api.h> 13#include <net/act_api.h>
13#include <net/netlink.h> 14#include <net/netlink.h>
14#include <net/pkt_cls.h> 15#include <net/pkt_cls.h>
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 07372f60bee3..593eac056e8d 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/slab.h>
34#include <linux/types.h> 35#include <linux/types.h>
35#include <linux/kernel.h> 36#include <linux/kernel.h>
36#include <linux/string.h> 37#include <linux/string.h>
@@ -772,10 +773,10 @@ static int __init init_u32(void)
772 printk(" Performance counters on\n"); 773 printk(" Performance counters on\n");
773#endif 774#endif
774#ifdef CONFIG_NET_CLS_IND 775#ifdef CONFIG_NET_CLS_IND
775 printk(" input device check on \n"); 776 printk(" input device check on\n");
776#endif 777#endif
777#ifdef CONFIG_NET_CLS_ACT 778#ifdef CONFIG_NET_CLS_ACT
778 printk(" Actions configured \n"); 779 printk(" Actions configured\n");
779#endif 780#endif
780 return register_tcf_proto_ops(&cls_u32_ops); 781 return register_tcf_proto_ops(&cls_u32_ops);
781} 782}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 24dce8b648a4..3bcac8aa333c 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -58,6 +58,7 @@
58 * only available if that subsystem is enabled in the kernel. 58 * only available if that subsystem is enabled in the kernel.
59 */ 59 */
60 60
61#include <linux/slab.h>
61#include <linux/module.h> 62#include <linux/module.h>
62#include <linux/types.h> 63#include <linux/types.h>
63#include <linux/kernel.h> 64#include <linux/kernel.h>
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 370a1b2ea317..1a4176aee6e5 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -9,6 +9,7 @@
9 * Authors: Thomas Graf <tgraf@suug.ch> 9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */ 10 */
11 11
12#include <linux/gfp.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 853c5ead87fd..763253257411 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -9,6 +9,7 @@
9 * Authors: Thomas Graf <tgraf@suug.ch> 9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index aab59409728b..e782bdeedc58 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -82,6 +82,7 @@
82 */ 82 */
83 83
84#include <linux/module.h> 84#include <linux/module.h>
85#include <linux/slab.h>
85#include <linux/types.h> 86#include <linux/types.h>
86#include <linux/kernel.h> 87#include <linux/kernel.h>
87#include <linux/errno.h> 88#include <linux/errno.h>
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 6cd491013b50..9839b26674f4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -28,16 +28,19 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/hrtimer.h> 29#include <linux/hrtimer.h>
30#include <linux/lockdep.h> 30#include <linux/lockdep.h>
31#include <linux/slab.h>
31 32
32#include <net/net_namespace.h> 33#include <net/net_namespace.h>
33#include <net/sock.h> 34#include <net/sock.h>
34#include <net/netlink.h> 35#include <net/netlink.h>
35#include <net/pkt_sched.h> 36#include <net/pkt_sched.h>
36 37
37static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, 38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
38 struct Qdisc *old, struct Qdisc *new); 40 struct Qdisc *old, struct Qdisc *new);
39static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, 41static int tclass_notify(struct net *net, struct sk_buff *oskb,
40 struct Qdisc *q, unsigned long cl, int event); 42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
41 44
42/* 45/*
43 46
@@ -638,11 +641,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
638} 641}
639EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 642EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
640 643
641static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid, 644static void notify_and_destroy(struct net *net, struct sk_buff *skb,
645 struct nlmsghdr *n, u32 clid,
642 struct Qdisc *old, struct Qdisc *new) 646 struct Qdisc *old, struct Qdisc *new)
643{ 647{
644 if (new || old) 648 if (new || old)
645 qdisc_notify(skb, n, clid, old, new); 649 qdisc_notify(net, skb, n, clid, old, new);
646 650
647 if (old) 651 if (old)
648 qdisc_destroy(old); 652 qdisc_destroy(old);
@@ -662,6 +666,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
662 struct Qdisc *new, struct Qdisc *old) 666 struct Qdisc *new, struct Qdisc *old)
663{ 667{
664 struct Qdisc *q = old; 668 struct Qdisc *q = old;
669 struct net *net = dev_net(dev);
665 int err = 0; 670 int err = 0;
666 671
667 if (parent == NULL) { 672 if (parent == NULL) {
@@ -698,12 +703,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
698 } 703 }
699 704
700 if (!ingress) { 705 if (!ingress) {
701 notify_and_destroy(skb, n, classid, dev->qdisc, new); 706 notify_and_destroy(net, skb, n, classid,
707 dev->qdisc, new);
702 if (new && !new->ops->attach) 708 if (new && !new->ops->attach)
703 atomic_inc(&new->refcnt); 709 atomic_inc(&new->refcnt);
704 dev->qdisc = new ? : &noop_qdisc; 710 dev->qdisc = new ? : &noop_qdisc;
705 } else { 711 } else {
706 notify_and_destroy(skb, n, classid, old, new); 712 notify_and_destroy(net, skb, n, classid, old, new);
707 } 713 }
708 714
709 if (dev->flags & IFF_UP) 715 if (dev->flags & IFF_UP)
@@ -721,7 +727,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
721 err = -ENOENT; 727 err = -ENOENT;
722 } 728 }
723 if (!err) 729 if (!err)
724 notify_and_destroy(skb, n, classid, old, new); 730 notify_and_destroy(net, skb, n, classid, old, new);
725 } 731 }
726 return err; 732 return err;
727} 733}
@@ -947,10 +953,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
947 struct Qdisc *p = NULL; 953 struct Qdisc *p = NULL;
948 int err; 954 int err;
949 955
950 if (!net_eq(net, &init_net)) 956 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
951 return -EINVAL;
952
953 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
954 return -ENODEV; 957 return -ENODEV;
955 958
956 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 959 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -990,7 +993,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
990 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) 993 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
991 return err; 994 return err;
992 } else { 995 } else {
993 qdisc_notify(skb, n, clid, NULL, q); 996 qdisc_notify(net, skb, n, clid, NULL, q);
994 } 997 }
995 return 0; 998 return 0;
996} 999}
@@ -1009,16 +1012,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1009 struct Qdisc *q, *p; 1012 struct Qdisc *q, *p;
1010 int err; 1013 int err;
1011 1014
1012 if (!net_eq(net, &init_net))
1013 return -EINVAL;
1014
1015replay: 1015replay:
1016 /* Reinit, just in case something touches this. */ 1016 /* Reinit, just in case something touches this. */
1017 tcm = NLMSG_DATA(n); 1017 tcm = NLMSG_DATA(n);
1018 clid = tcm->tcm_parent; 1018 clid = tcm->tcm_parent;
1019 q = p = NULL; 1019 q = p = NULL;
1020 1020
1021 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 1021 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1022 return -ENODEV; 1022 return -ENODEV;
1023 1023
1024 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1024 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1105,7 +1105,7 @@ replay:
1105 return -EINVAL; 1105 return -EINVAL;
1106 err = qdisc_change(q, tca); 1106 err = qdisc_change(q, tca);
1107 if (err == 0) 1107 if (err == 0)
1108 qdisc_notify(skb, n, clid, NULL, q); 1108 qdisc_notify(net, skb, n, clid, NULL, q);
1109 return err; 1109 return err;
1110 1110
1111create_n_graft: 1111create_n_graft:
@@ -1195,8 +1195,9 @@ nla_put_failure:
1195 return -1; 1195 return -1;
1196} 1196}
1197 1197
1198static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, 1198static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1199 u32 clid, struct Qdisc *old, struct Qdisc *new) 1199 struct nlmsghdr *n, u32 clid,
1200 struct Qdisc *old, struct Qdisc *new)
1200{ 1201{
1201 struct sk_buff *skb; 1202 struct sk_buff *skb;
1202 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 1203 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -1215,7 +1216,7 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1215 } 1216 }
1216 1217
1217 if (skb->len) 1218 if (skb->len)
1218 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 1219 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1219 1220
1220err_out: 1221err_out:
1221 kfree_skb(skb); 1222 kfree_skb(skb);
@@ -1274,15 +1275,12 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1274 int s_idx, s_q_idx; 1275 int s_idx, s_q_idx;
1275 struct net_device *dev; 1276 struct net_device *dev;
1276 1277
1277 if (!net_eq(net, &init_net))
1278 return 0;
1279
1280 s_idx = cb->args[0]; 1278 s_idx = cb->args[0];
1281 s_q_idx = q_idx = cb->args[1]; 1279 s_q_idx = q_idx = cb->args[1];
1282 1280
1283 rcu_read_lock(); 1281 rcu_read_lock();
1284 idx = 0; 1282 idx = 0;
1285 for_each_netdev_rcu(&init_net, dev) { 1283 for_each_netdev_rcu(net, dev) {
1286 struct netdev_queue *dev_queue; 1284 struct netdev_queue *dev_queue;
1287 1285
1288 if (idx < s_idx) 1286 if (idx < s_idx)
@@ -1334,10 +1332,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1334 u32 qid = TC_H_MAJ(clid); 1332 u32 qid = TC_H_MAJ(clid);
1335 int err; 1333 int err;
1336 1334
1337 if (!net_eq(net, &init_net)) 1335 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1338 return -EINVAL;
1339
1340 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1341 return -ENODEV; 1336 return -ENODEV;
1342 1337
1343 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1338 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1418,10 +1413,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1418 if (cops->delete) 1413 if (cops->delete)
1419 err = cops->delete(q, cl); 1414 err = cops->delete(q, cl);
1420 if (err == 0) 1415 if (err == 0)
1421 tclass_notify(skb, n, q, cl, RTM_DELTCLASS); 1416 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1422 goto out; 1417 goto out;
1423 case RTM_GETTCLASS: 1418 case RTM_GETTCLASS:
1424 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS); 1419 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1425 goto out; 1420 goto out;
1426 default: 1421 default:
1427 err = -EINVAL; 1422 err = -EINVAL;
@@ -1434,7 +1429,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1434 if (cops->change) 1429 if (cops->change)
1435 err = cops->change(q, clid, pid, tca, &new_cl); 1430 err = cops->change(q, clid, pid, tca, &new_cl);
1436 if (err == 0) 1431 if (err == 0)
1437 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); 1432 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1438 1433
1439out: 1434out:
1440 if (cl) 1435 if (cl)
@@ -1486,8 +1481,9 @@ nla_put_failure:
1486 return -1; 1481 return -1;
1487} 1482}
1488 1483
1489static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, 1484static int tclass_notify(struct net *net, struct sk_buff *oskb,
1490 struct Qdisc *q, unsigned long cl, int event) 1485 struct nlmsghdr *n, struct Qdisc *q,
1486 unsigned long cl, int event)
1491{ 1487{
1492 struct sk_buff *skb; 1488 struct sk_buff *skb;
1493 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 1489 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -1501,7 +1497,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1501 return -EINVAL; 1497 return -EINVAL;
1502 } 1498 }
1503 1499
1504 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 1500 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1505} 1501}
1506 1502
1507struct qdisc_dump_args 1503struct qdisc_dump_args
@@ -1576,12 +1572,9 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1576 struct net_device *dev; 1572 struct net_device *dev;
1577 int t, s_t; 1573 int t, s_t;
1578 1574
1579 if (!net_eq(net, &init_net))
1580 return 0;
1581
1582 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 1575 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1583 return 0; 1576 return 0;
1584 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 1577 if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1585 return 0; 1578 return 0;
1586 1579
1587 s_t = cb->args[0]; 1580 s_t = cb->args[0];
@@ -1691,7 +1684,7 @@ static int psched_show(struct seq_file *seq, void *v)
1691 1684
1692static int psched_open(struct inode *inode, struct file *file) 1685static int psched_open(struct inode *inode, struct file *file)
1693{ 1686{
1694 return single_open(file, psched_show, PDE(inode)->data); 1687 return single_open(file, psched_show, NULL);
1695} 1688}
1696 1689
1697static const struct file_operations psched_fops = { 1690static const struct file_operations psched_fops = {
@@ -1701,15 +1694,53 @@ static const struct file_operations psched_fops = {
1701 .llseek = seq_lseek, 1694 .llseek = seq_lseek,
1702 .release = single_release, 1695 .release = single_release,
1703}; 1696};
1697
1698static int __net_init psched_net_init(struct net *net)
1699{
1700 struct proc_dir_entry *e;
1701
1702 e = proc_net_fops_create(net, "psched", 0, &psched_fops);
1703 if (e == NULL)
1704 return -ENOMEM;
1705
1706 return 0;
1707}
1708
1709static void __net_exit psched_net_exit(struct net *net)
1710{
1711 proc_net_remove(net, "psched");
1712}
1713#else
1714static int __net_init psched_net_init(struct net *net)
1715{
1716 return 0;
1717}
1718
1719static void __net_exit psched_net_exit(struct net *net)
1720{
1721}
1704#endif 1722#endif
1705 1723
1724static struct pernet_operations psched_net_ops = {
1725 .init = psched_net_init,
1726 .exit = psched_net_exit,
1727};
1728
1706static int __init pktsched_init(void) 1729static int __init pktsched_init(void)
1707{ 1730{
1731 int err;
1732
1733 err = register_pernet_subsys(&psched_net_ops);
1734 if (err) {
1735 printk(KERN_ERR "pktsched_init: "
1736 "cannot initialize per netns operations\n");
1737 return err;
1738 }
1739
1708 register_qdisc(&pfifo_qdisc_ops); 1740 register_qdisc(&pfifo_qdisc_ops);
1709 register_qdisc(&bfifo_qdisc_ops); 1741 register_qdisc(&bfifo_qdisc_ops);
1710 register_qdisc(&pfifo_head_drop_qdisc_ops); 1742 register_qdisc(&pfifo_head_drop_qdisc_ops);
1711 register_qdisc(&mq_qdisc_ops); 1743 register_qdisc(&mq_qdisc_ops);
1712 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1713 1744
1714 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); 1745 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1715 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); 1746 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index ab82f145f689..fcbb86a486a2 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -3,6 +3,7 @@
3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */ 3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/slab.h>
6#include <linux/init.h> 7#include <linux/init.h>
7#include <linux/string.h> 8#include <linux/string.h>
8#include <linux/errno.h> 9#include <linux/errno.h>
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 3846d65bc03e..28c01ef5abc8 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/string.h> 17#include <linux/string.h>
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index a65604f8f2b8..b74046a95397 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/slab.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/errno.h> 14#include <linux/errno.h>
14#include <linux/netdevice.h> 15#include <linux/netdevice.h>
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index d303daa45d49..63d41f86679c 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/slab.h>
8#include <linux/types.h> 9#include <linux/types.h>
9#include <linux/string.h> 10#include <linux/string.h>
10#include <linux/errno.h> 11#include <linux/errno.h>
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 4b0a6cc44c77..5948bafa8ce2 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5173c1e1b19c..aeddabfb8e4e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -24,6 +24,7 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h>
27#include <net/pkt_sched.h> 28#include <net/pkt_sched.h>
28 29
29/* Main transmission queue. */ 30/* Main transmission queue. */
@@ -528,7 +529,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
528 unsigned int size; 529 unsigned int size;
529 int err = -ENOBUFS; 530 int err = -ENOBUFS;
530 531
531 /* ensure that the Qdisc and the private data are 32-byte aligned */ 532 /* ensure that the Qdisc and the private data are 64-byte aligned */
532 size = QDISC_ALIGN(sizeof(*sch)); 533 size = QDISC_ALIGN(sizeof(*sch));
533 size += ops->priv_size + (QDISC_ALIGNTO - 1); 534 size += ops->priv_size + (QDISC_ALIGNTO - 1);
534 535
@@ -590,6 +591,13 @@ void qdisc_reset(struct Qdisc *qdisc)
590} 591}
591EXPORT_SYMBOL(qdisc_reset); 592EXPORT_SYMBOL(qdisc_reset);
592 593
594static void qdisc_rcu_free(struct rcu_head *head)
595{
596 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
597
598 kfree((char *) qdisc - qdisc->padded);
599}
600
593void qdisc_destroy(struct Qdisc *qdisc) 601void qdisc_destroy(struct Qdisc *qdisc)
594{ 602{
595 const struct Qdisc_ops *ops = qdisc->ops; 603 const struct Qdisc_ops *ops = qdisc->ops;
@@ -613,7 +621,11 @@ void qdisc_destroy(struct Qdisc *qdisc)
613 dev_put(qdisc_dev(qdisc)); 621 dev_put(qdisc_dev(qdisc));
614 622
615 kfree_skb(qdisc->gso_skb); 623 kfree_skb(qdisc->gso_skb);
616 kfree((char *) qdisc - qdisc->padded); 624 /*
625 * gen_estimator est_timer() might access qdisc->q.lock,
626 * wait a RCU grace period before freeing qdisc.
627 */
628 call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
617} 629}
618EXPORT_SYMBOL(qdisc_destroy); 630EXPORT_SYMBOL(qdisc_destroy);
619 631
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 40408d595c08..51dcc2aa5c92 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -18,6 +18,7 @@
18 * For all the glorious comments look at include/net/red.h 18 * For all the glorious comments look at include/net/red.h
19 */ 19 */
20 20
21#include <linux/slab.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 508cf5f3a6d5..0b52b8de562c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -36,6 +36,7 @@
36#include <linux/compiler.h> 36#include <linux/compiler.h>
37#include <linux/rbtree.h> 37#include <linux/rbtree.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/slab.h>
39#include <net/netlink.h> 40#include <net/netlink.h>
40#include <net/pkt_sched.h> 41#include <net/pkt_sched.h>
41 42
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index d1dea3d5dc92..b2aba3f5e6fa 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/slab.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/string.h> 14#include <linux/string.h>
14#include <linux/errno.h> 15#include <linux/errno.h>
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 7db2c88ce585..c50876cd8704 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/slab.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/string.h> 24#include <linux/string.h>
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index d8b10e054627..4714ff162bbd 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/slab.h>
17#include <linux/types.h> 18#include <linux/types.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/errno.h> 20#include <linux/errno.h>
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 93285cecb246..81672e0c1b25 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/string.h> 18#include <linux/string.h>
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index cb21380c0605..c5a9ac566007 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -20,6 +20,7 @@
20#include <linux/ipv6.h> 20#include <linux/ipv6.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/jhash.h> 22#include <linux/jhash.h>
23#include <linux/slab.h>
23#include <net/ip.h> 24#include <net/ip.h>
24#include <net/netlink.h> 25#include <net/netlink.h>
25#include <net/pkt_sched.h> 26#include <net/pkt_sched.h>
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index db69637069c4..3415b6ce1c0a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h>
14#include <linux/string.h> 15#include <linux/string.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <linux/if_arp.h> 17#include <linux/if_arp.h>
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 56935bbc1496..86366390038a 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -34,6 +34,7 @@
34 * be incorporated into the next SCTP release. 34 * be incorporated into the next SCTP release.
35 */ 35 */
36 36
37#include <linux/slab.h>
37#include <linux/types.h> 38#include <linux/types.h>
38#include <linux/crypto.h> 39#include <linux/crypto.h>
39#include <linux/scatterlist.h> 40#include <linux/scatterlist.h>
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index bef133731683..faf71d179e46 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/slab.h>
46#include <linux/in.h> 47#include <linux/in.h>
47#include <net/sock.h> 48#include <net/sock.h>
48#include <net/ipv6.h> 49#include <net/ipv6.h>
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 8e4320040f05..3eab6db59a37 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -42,6 +42,7 @@
42#include <linux/net.h> 42#include <linux/net.h>
43#include <linux/inet.h> 43#include <linux/inet.h>
44#include <linux/skbuff.h> 44#include <linux/skbuff.h>
45#include <linux/slab.h>
45#include <net/sock.h> 46#include <net/sock.h>
46#include <net/sctp/sctp.h> 47#include <net/sctp/sctp.h>
47#include <net/sctp/sm.h> 48#include <net/sctp/sm.h>
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 3d74b264ea22..2a570184e5a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -53,6 +53,7 @@
53#include <linux/socket.h> 53#include <linux/socket.h>
54#include <linux/ip.h> 54#include <linux/ip.h>
55#include <linux/time.h> /* For struct timeval */ 55#include <linux/time.h> /* For struct timeval */
56#include <linux/slab.h>
56#include <net/ip.h> 57#include <net/ip.h>
57#include <net/icmp.h> 58#include <net/icmp.h>
58#include <net/snmp.h> 59#include <net/snmp.h>
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index bbf5dd2a97c4..ccb6dc48d15b 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -46,6 +46,7 @@
46#include <net/sctp/sctp.h> 46#include <net/sctp/sctp.h>
47#include <net/sctp/sm.h> 47#include <net/sctp/sm.h>
48#include <linux/interrupt.h> 48#include <linux/interrupt.h>
49#include <linux/slab.h>
49 50
50/* Initialize an SCTP inqueue. */ 51/* Initialize an SCTP inqueue. */
51void sctp_inq_init(struct sctp_inq *queue) 52void sctp_inq_init(struct sctp_inq *queue)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 1d7ac70ba39f..732689140fb8 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -58,6 +58,7 @@
58#include <linux/netdevice.h> 58#include <linux/netdevice.h>
59#include <linux/init.h> 59#include <linux/init.h>
60#include <linux/ipsec.h> 60#include <linux/ipsec.h>
61#include <linux/slab.h>
61 62
62#include <linux/ipv6.h> 63#include <linux/ipv6.h>
63#include <linux/icmpv6.h> 64#include <linux/icmpv6.h>
@@ -231,7 +232,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
231 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 232 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
232 skb->local_df = 1; 233 skb->local_df = 1;
233 234
234 return ip6_xmit(sk, skb, &fl, np->opt, 0); 235 return ip6_xmit(sk, skb, &fl, np->opt);
235} 236}
236 237
237/* Returns the dst cache entry for the given source and destination ip 238/* Returns the dst cache entry for the given source and destination ip
@@ -276,20 +277,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
276static inline int sctp_v6_addr_match_len(union sctp_addr *s1, 277static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
277 union sctp_addr *s2) 278 union sctp_addr *s2)
278{ 279{
279 struct in6_addr *a1 = &s1->v6.sin6_addr; 280 return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr);
280 struct in6_addr *a2 = &s2->v6.sin6_addr;
281 int i, j;
282
283 for (i = 0; i < 4 ; i++) {
284 __be32 a1xora2;
285
286 a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i];
287
288 if ((j = fls(ntohl(a1xora2))))
289 return (i * 32 + 32 - j);
290 }
291
292 return (i*32);
293} 281}
294 282
295/* Fills in the source address(saddr) based on the destination address(daddr) 283/* Fills in the source address(saddr) based on the destination address(daddr)
@@ -371,13 +359,13 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
371 } 359 }
372 360
373 read_lock_bh(&in6_dev->lock); 361 read_lock_bh(&in6_dev->lock);
374 for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { 362 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
375 /* Add the address to the local list. */ 363 /* Add the address to the local list. */
376 addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); 364 addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
377 if (addr) { 365 if (addr) {
378 addr->a.v6.sin6_family = AF_INET6; 366 addr->a.v6.sin6_family = AF_INET6;
379 addr->a.v6.sin6_port = 0; 367 addr->a.v6.sin6_port = 0;
380 addr->a.v6.sin6_addr = ifp->addr; 368 ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
381 addr->a.v6.sin6_scope_id = dev->ifindex; 369 addr->a.v6.sin6_scope_id = dev->ifindex;
382 addr->valid = 1; 370 addr->valid = 1;
383 INIT_LIST_HEAD(&addr->list); 371 INIT_LIST_HEAD(&addr->list);
@@ -418,7 +406,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
418{ 406{
419 addr->v6.sin6_family = AF_INET6; 407 addr->v6.sin6_family = AF_INET6;
420 addr->v6.sin6_port = 0; 408 addr->v6.sin6_port = 0;
421 addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; 409 ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
422} 410}
423 411
424/* Initialize sk->sk_rcv_saddr from sctp_addr. */ 412/* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -431,7 +419,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
431 inet6_sk(sk)->rcv_saddr.s6_addr32[3] = 419 inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
432 addr->v4.sin_addr.s_addr; 420 addr->v4.sin_addr.s_addr;
433 } else { 421 } else {
434 inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr; 422 ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
435 } 423 }
436} 424}
437 425
@@ -444,7 +432,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
444 inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); 432 inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
445 inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; 433 inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
446 } else { 434 } else {
447 inet6_sk(sk)->daddr = addr->v6.sin6_addr; 435 ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
448 } 436 }
449} 437}
450 438
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7c5589363433..fad261d41ec2 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -48,6 +48,7 @@
48#include <linux/ip.h> 48#include <linux/ip.h>
49#include <linux/ipv6.h> 49#include <linux/ipv6.h>
50#include <linux/init.h> 50#include <linux/init.h>
51#include <linux/slab.h>
51#include <net/inet_ecn.h> 52#include <net/inet_ecn.h>
52#include <net/ip.h> 53#include <net/ip.h>
53#include <net/icmp.h> 54#include <net/icmp.h>
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 229690f02a1d..abfc0b8dee74 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -50,6 +50,7 @@
50#include <linux/list.h> /* For struct list_head */ 50#include <linux/list.h> /* For struct list_head */
51#include <linux/socket.h> 51#include <linux/socket.h>
52#include <linux/ip.h> 52#include <linux/ip.h>
53#include <linux/slab.h>
53#include <net/sock.h> /* For skb_set_owner_w */ 54#include <net/sock.h> /* For skb_set_owner_w */
54 55
55#include <net/sctp/sctp.h> 56#include <net/sctp/sctp.h>
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 8cb4f060bce6..534c7eae9d15 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -50,6 +50,7 @@
50#include <linux/socket.h> 50#include <linux/socket.h>
51#include <linux/ip.h> 51#include <linux/ip.h>
52#include <linux/time.h> /* For struct timeval */ 52#include <linux/time.h> /* For struct timeval */
53#include <linux/gfp.h>
53#include <net/sock.h> 54#include <net/sock.h>
54#include <net/sctp/sctp.h> 55#include <net/sctp/sctp.h>
55#include <net/sctp/sm.h> 56#include <net/sctp/sm.h>
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e771690f6d5d..704298f4b284 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -54,6 +54,7 @@
54#include <linux/bootmem.h> 54#include <linux/bootmem.h>
55#include <linux/highmem.h> 55#include <linux/highmem.h>
56#include <linux/swap.h> 56#include <linux/swap.h>
57#include <linux/slab.h>
57#include <net/net_namespace.h> 58#include <net/net_namespace.h>
58#include <net/protocol.h> 59#include <net/protocol.h>
59#include <net/ip.h> 60#include <net/ip.h>
@@ -853,7 +854,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
853 IP_PMTUDISC_DO : IP_PMTUDISC_DONT; 854 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
854 855
855 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 856 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
856 return ip_queue_xmit(skb, 0); 857 return ip_queue_xmit(skb);
857} 858}
858 859
859static struct sctp_af sctp_af_inet; 860static struct sctp_af sctp_af_inet;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9e732916b671..17cb400ecd6a 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -58,6 +58,7 @@
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/scatterlist.h> 59#include <linux/scatterlist.h>
60#include <linux/crypto.h> 60#include <linux/crypto.h>
61#include <linux/slab.h>
61#include <net/sock.h> 62#include <net/sock.h>
62 63
63#include <linux/skbuff.h> 64#include <linux/skbuff.h>
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 500886bda9b4..4c5bed9af4e3 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -51,6 +51,7 @@
51#include <linux/types.h> 51#include <linux/types.h>
52#include <linux/socket.h> 52#include <linux/socket.h>
53#include <linux/ip.h> 53#include <linux/ip.h>
54#include <linux/gfp.h>
54#include <net/sock.h> 55#include <net/sock.h>
55#include <net/sctp/sctp.h> 56#include <net/sctp/sctp.h>
56#include <net/sctp/sm.h> 57#include <net/sctp/sm.h>
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 47bc20d3a85b..abf601a1b847 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -56,6 +56,7 @@
56#include <linux/ipv6.h> 56#include <linux/ipv6.h>
57#include <linux/net.h> 57#include <linux/net.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/slab.h>
59#include <net/sock.h> 60#include <net/sock.h>
60#include <net/inet_ecn.h> 61#include <net/inet_ecn.h>
61#include <linux/skbuff.h> 62#include <linux/skbuff.h>
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index dfc5c127efd4..c1941276f6e3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -67,6 +67,7 @@
67#include <linux/poll.h> 67#include <linux/poll.h>
68#include <linux/init.h> 68#include <linux/init.h>
69#include <linux/crypto.h> 69#include <linux/crypto.h>
70#include <linux/slab.h>
70 71
71#include <net/ip.h> 72#include <net/ip.h>
72#include <net/icmp.h> 73#include <net/icmp.h>
@@ -5481,7 +5482,6 @@ pp_found:
5481 */ 5482 */
5482 int reuse = sk->sk_reuse; 5483 int reuse = sk->sk_reuse;
5483 struct sock *sk2; 5484 struct sock *sk2;
5484 struct hlist_node *node;
5485 5485
5486 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); 5486 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
5487 if (pp->fastreuse && sk->sk_reuse && 5487 if (pp->fastreuse && sk->sk_reuse &&
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index 737d330e5ffc..442ad4ed6315 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -37,6 +37,7 @@
37 */ 37 */
38 38
39#include <linux/types.h> 39#include <linux/types.h>
40#include <linux/slab.h>
40#include <net/sctp/sctp.h> 41#include <net/sctp/sctp.h>
41#include <net/sctp/sm.h> 42#include <net/sctp/sm.h>
42 43
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index b827d21dbe54..be4d63d5a5cc 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -48,6 +48,7 @@
48 * be incorporated into the next SCTP release. 48 * be incorporated into the next SCTP release.
49 */ 49 */
50 50
51#include <linux/slab.h>
51#include <linux/types.h> 52#include <linux/types.h>
52#include <linux/random.h> 53#include <linux/random.h>
53#include <net/sctp/sctp.h> 54#include <net/sctp/sctp.h>
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 9bd64565021a..747d5412c463 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -42,6 +42,7 @@
42 * be incorporated into the next SCTP release. 42 * be incorporated into the next SCTP release.
43 */ 43 */
44 44
45#include <linux/slab.h>
45#include <linux/types.h> 46#include <linux/types.h>
46#include <linux/bitmap.h> 47#include <linux/bitmap.h>
47#include <net/sctp/sctp.h> 48#include <net/sctp/sctp.h>
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 8b3560fd876d..aa72e89c3ee1 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -43,6 +43,7 @@
43 * be incorporated into the next SCTP release. 43 * be incorporated into the next SCTP release.
44 */ 44 */
45 45
46#include <linux/slab.h>
46#include <linux/types.h> 47#include <linux/types.h>
47#include <linux/skbuff.h> 48#include <linux/skbuff.h>
48#include <net/sctp/structs.h> 49#include <net/sctp/structs.h>
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 7b23803343cc..3a448536f0b6 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -41,6 +41,7 @@
41 * be incorporated into the next SCTP release. 41 * be incorporated into the next SCTP release.
42 */ 42 */
43 43
44#include <linux/slab.h>
44#include <linux/types.h> 45#include <linux/types.h>
45#include <linux/skbuff.h> 46#include <linux/skbuff.h>
46#include <net/sock.h> 47#include <net/sock.h>
diff --git a/net/socket.c b/net/socket.c
index 769c386bd428..35bc198bbf68 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -87,6 +87,7 @@
87#include <linux/wireless.h> 87#include <linux/wireless.h>
88#include <linux/nsproxy.h> 88#include <linux/nsproxy.h>
89#include <linux/magic.h> 89#include <linux/magic.h>
90#include <linux/slab.h>
90 91
91#include <asm/uaccess.h> 92#include <asm/uaccess.h>
92#include <asm/unistd.h> 93#include <asm/unistd.h>
@@ -619,10 +620,9 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
619 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, 620 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
620 sizeof(tv), &tv); 621 sizeof(tv), &tv);
621 } else { 622 } else {
622 struct timespec ts; 623 skb_get_timestampns(skb, &ts[0]);
623 skb_get_timestampns(skb, &ts);
624 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, 624 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
625 sizeof(ts), &ts); 625 sizeof(ts[0]), &ts[0]);
626 } 626 }
627 } 627 }
628 628
@@ -2135,6 +2135,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2135 break; 2135 break;
2136 ++datagrams; 2136 ++datagrams;
2137 2137
2138 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2139 if (flags & MSG_WAITFORONE)
2140 flags |= MSG_DONTWAIT;
2141
2138 if (timeout) { 2142 if (timeout) {
2139 ktime_get_ts(timeout); 2143 ktime_get_ts(timeout);
2140 *timeout = timespec_sub(end_time, *timeout); 2144 *timeout = timespec_sub(end_time, *timeout);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index f845d9d72f73..1419d0cdbbac 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -18,6 +18,7 @@
18 18
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20#include <linux/sunrpc/clnt.h> 20#include <linux/sunrpc/clnt.h>
21#include <linux/slab.h>
21 22
22#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 23#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
23 24
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index bf88bf8e9365..8f623b0f03dd 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <linux/err.h> 7#include <linux/err.h>
8#include <linux/slab.h>
8#include <linux/types.h> 9#include <linux/types.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 0cfccc2a0297..c389ccf6437d 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1280,9 +1280,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
1280 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1280 rqstp->rq_release_snd_buf = priv_release_snd_buf;
1281 return 0; 1281 return 0;
1282out_free: 1282out_free:
1283 for (i--; i >= 0; i--) { 1283 rqstp->rq_enc_pages_num = i;
1284 __free_page(rqstp->rq_enc_pages[i]); 1284 priv_release_snd_buf(rqstp);
1285 }
1286out: 1285out:
1287 return -EAGAIN; 1286 return -EAGAIN;
1288} 1287}
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index c0ba39c4f5f2..310b78e99456 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -33,7 +33,6 @@
33 33
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/slab.h>
37#include <linux/string.h> 36#include <linux/string.h>
38#include <linux/sunrpc/sched.h> 37#include <linux/sunrpc/sched.h>
39#include <linux/sunrpc/gss_asn1.h> 38#include <linux/sunrpc/gss_asn1.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index c93fca204558..e9b636176687 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -37,7 +37,6 @@
37#include <linux/err.h> 37#include <linux/err.h>
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/slab.h>
41#include <linux/scatterlist.h> 40#include <linux/scatterlist.h>
42#include <linux/crypto.h> 41#include <linux/crypto.h>
43#include <linux/highmem.h> 42#include <linux/highmem.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index b8f42ef7178e..88fe6e75ed7e 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -59,7 +59,6 @@
59 */ 59 */
60 60
61#include <linux/types.h> 61#include <linux/types.h>
62#include <linux/slab.h>
63#include <linux/jiffies.h> 62#include <linux/jiffies.h>
64#include <linux/sunrpc/gss_krb5.h> 63#include <linux/sunrpc/gss_krb5.h>
65#include <linux/random.h> 64#include <linux/random.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 17562b4c35f6..6331cd6866ec 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -32,7 +32,6 @@
32 */ 32 */
33 33
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/slab.h>
36#include <linux/sunrpc/gss_krb5.h> 35#include <linux/sunrpc/gss_krb5.h>
37#include <linux/crypto.h> 36#include <linux/crypto.h>
38 37
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 066ec73c84d6..ce6c247edad0 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -58,7 +58,6 @@
58 */ 58 */
59 59
60#include <linux/types.h> 60#include <linux/types.h>
61#include <linux/slab.h>
62#include <linux/jiffies.h> 61#include <linux/jiffies.h>
63#include <linux/sunrpc/gss_krb5.h> 62#include <linux/sunrpc/gss_krb5.h>
64#include <linux/crypto.h> 63#include <linux/crypto.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index ae8e69b59c4c..a6e905637e03 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -1,5 +1,4 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/slab.h>
3#include <linux/jiffies.h> 2#include <linux/jiffies.h>
4#include <linux/sunrpc/gss_krb5.h> 3#include <linux/sunrpc/gss_krb5.h>
5#include <linux/random.h> 4#include <linux/random.h>
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index c832712f8d55..5a3a65a0e2b4 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -34,7 +34,6 @@
34 */ 34 */
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/slab.h>
38#include <linux/jiffies.h> 37#include <linux/jiffies.h>
39#include <linux/sunrpc/gss_spkm3.h> 38#include <linux/sunrpc/gss_spkm3.h>
40#include <linux/random.h> 39#include <linux/random.h>
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 3308157436d2..a99825d7caa0 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -223,7 +223,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
223 223
224 /* only support SPKM_MIC_TOK */ 224 /* only support SPKM_MIC_TOK */
225 if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { 225 if((ptr[6] != 0x01) || (ptr[7] != 0x01)) {
226 dprintk("RPC: ERROR unsupported SPKM3 token \n"); 226 dprintk("RPC: ERROR unsupported SPKM3 token\n");
227 goto out; 227 goto out;
228 } 228 }
229 229
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e34bc531fcb9..b81e790ef9f4 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -37,6 +37,7 @@
37 * 37 *
38 */ 38 */
39 39
40#include <linux/slab.h>
40#include <linux/types.h> 41#include <linux/types.h>
41#include <linux/module.h> 42#include <linux/module.h>
42#include <linux/pagemap.h> 43#include <linux/pagemap.h>
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 46b2647c5bd2..aac2f8b4ee21 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7 */ 7 */
8 8
9#include <linux/slab.h>
9#include <linux/types.h> 10#include <linux/types.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <linux/module.h> 12#include <linux/module.h>
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 553621fb2c41..cf06af3b63c6 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -22,6 +22,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22******************************************************************************/ 22******************************************************************************/
23 23
24#include <linux/tcp.h> 24#include <linux/tcp.h>
25#include <linux/slab.h>
25#include <linux/sunrpc/xprt.h> 26#include <linux/sunrpc/xprt.h>
26 27
27#ifdef RPC_DEBUG 28#ifdef RPC_DEBUG
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 13f214f53120..7dcfe0cc3500 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -37,21 +37,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 37
38#define RPCDBG_FACILITY RPCDBG_SVCDSP 38#define RPCDBG_FACILITY RPCDBG_SVCDSP
39 39
40void bc_release_request(struct rpc_task *task)
41{
42 struct rpc_rqst *req = task->tk_rqstp;
43
44 dprintk("RPC: bc_release_request: task= %p\n", task);
45
46 /*
47 * Release this request only if it's a backchannel
48 * preallocated request
49 */
50 if (!bc_prealloc(req))
51 return;
52 xprt_free_bc_request(req);
53}
54
55/* Empty callback ops */ 40/* Empty callback ops */
56static const struct rpc_call_ops nfs41_callback_ops = { 41static const struct rpc_call_ops nfs41_callback_ops = {
57}; 42};
@@ -75,7 +60,7 @@ int bc_send(struct rpc_rqst *req)
75 rpc_put_task(task); 60 rpc_put_task(task);
76 } 61 }
77 return ret; 62 return ret;
78 dprintk("RPC: bc_send ret= %d \n", ret); 63 dprintk("RPC: bc_send ret= %d\n", ret);
79} 64}
80 65
81#endif /* CONFIG_NFS_V4_1 */ 66#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 154034b675bd..19c9983d5360 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -659,6 +659,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
659 task = rpc_new_task(&task_setup_data); 659 task = rpc_new_task(&task_setup_data);
660 if (!task) { 660 if (!task) {
661 xprt_free_bc_request(req); 661 xprt_free_bc_request(req);
662 task = ERR_PTR(-ENOMEM);
662 goto out; 663 goto out;
663 } 664 }
664 task->tk_rqstp = req; 665 task->tk_rqstp = req;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 8d63f8fd29b7..20e30c6f8355 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
587 struct dentry *dentry; 587 struct dentry *dentry;
588 588
589 dentry = __rpc_lookup_create(parent, name); 589 dentry = __rpc_lookup_create(parent, name);
590 if (IS_ERR(dentry))
591 return dentry;
590 if (dentry->d_inode == NULL) 592 if (dentry->d_inode == NULL)
591 return dentry; 593 return dentry;
592 dput(dentry); 594 dput(dentry);
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3e3772d8eb92..121105355f60 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/slab.h>
24#include <net/ipv6.h> 25#include <net/ipv6.h>
25 26
26#include <linux/sunrpc/clnt.h> 27#include <linux/sunrpc/clnt.h>
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index a661a3acb37e..10b4319ebbca 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/gfp.h>
11#include <linux/skbuff.h> 12#include <linux/skbuff.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/pagemap.h> 14#include <linux/pagemap.h>
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 1b4e6791ecf3..5785d2037f45 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/slab.h>
16 17
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 8420a4205b76..d9017d64597e 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -19,6 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/slab.h>
22 23
23#include <linux/sunrpc/types.h> 24#include <linux/sunrpc/types.h>
24#include <linux/sunrpc/xdr.h> 25#include <linux/sunrpc/xdr.h>
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 8f0f1fb3dc52..061b2e0f9118 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -9,6 +9,7 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/freezer.h> 10#include <linux/freezer.h>
11#include <linux/kthread.h> 11#include <linux/kthread.h>
12#include <linux/slab.h>
12#include <net/sock.h> 13#include <net/sock.h>
13#include <linux/sunrpc/stats.h> 14#include <linux/sunrpc/stats.h>
14#include <linux/sunrpc/svc_xprt.h> 15#include <linux/sunrpc/svc_xprt.h>
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index afdcb0459a83..207311610988 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -10,6 +10,7 @@
10#include <linux/seq_file.h> 10#include <linux/seq_file.h>
11#include <linux/hash.h> 11#include <linux/hash.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/slab.h>
13#include <net/sock.h> 14#include <net/sock.h>
14#include <net/ipv6.h> 15#include <net/ipv6.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 8bd690c48b69..2763fde88499 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/slab.h>
10#include <linux/types.h> 11#include <linux/types.h>
11#include <linux/string.h> 12#include <linux/string.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 469de292c23c..42f09ade0044 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -46,6 +46,7 @@
46 46
47#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/metrics.h> 48#include <linux/sunrpc/metrics.h>
49#include <linux/sunrpc/bc_xprt.h>
49 50
50#include "sunrpc.h" 51#include "sunrpc.h"
51 52
@@ -1032,21 +1033,16 @@ void xprt_release(struct rpc_task *task)
1032 if (req->rq_release_snd_buf) 1033 if (req->rq_release_snd_buf)
1033 req->rq_release_snd_buf(req); 1034 req->rq_release_snd_buf(req);
1034 1035
1035 /*
1036 * Early exit if this is a backchannel preallocated request.
1037 * There is no need to have it added to the RPC slot list.
1038 */
1039 if (is_bc_request)
1040 return;
1041
1042 memset(req, 0, sizeof(*req)); /* mark unused */
1043
1044 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1036 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1037 if (likely(!is_bc_request)) {
1038 memset(req, 0, sizeof(*req)); /* mark unused */
1045 1039
1046 spin_lock(&xprt->reserve_lock); 1040 spin_lock(&xprt->reserve_lock);
1047 list_add(&req->rq_list, &xprt->free); 1041 list_add(&req->rq_list, &xprt->free);
1048 rpc_wake_up_next(&xprt->backlog); 1042 rpc_wake_up_next(&xprt->backlog);
1049 spin_unlock(&xprt->reserve_lock); 1043 spin_unlock(&xprt->reserve_lock);
1044 } else
1045 xprt_free_bc_request(req);
1050} 1046}
1051 1047
1052/** 1048/**
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 5b8a8ff93a25..d718b8fa9525 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -40,6 +40,7 @@
40 */ 40 */
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/slab.h>
43#include <linux/fs.h> 44#include <linux/fs.h>
44#include <linux/sysctl.h> 45#include <linux/sysctl.h>
45#include <linux/sunrpc/clnt.h> 46#include <linux/sunrpc/clnt.h>
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 3fa5751af0ec..fd90eb89842b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -43,6 +43,7 @@
43#include <linux/sunrpc/debug.h> 43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h> 44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <linux/slab.h>
46#include <linux/spinlock.h> 47#include <linux/spinlock.h>
47#include <rdma/ib_verbs.h> 48#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h> 49#include <rdma/rdma_cm.h>
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index f96c2fe6137b..187257b1d880 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -49,6 +49,7 @@
49 49
50#include <linux/module.h> 50#include <linux/module.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/slab.h>
52#include <linux/seq_file.h> 53#include <linux/seq_file.h>
53 54
54#include "xprt_rdma.h" 55#include "xprt_rdma.h"
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 2209aa87d899..27015c6d8eb5 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -48,6 +48,7 @@
48 */ 48 */
49 49
50#include <linux/pci.h> /* for Tavor hack below */ 50#include <linux/pci.h> /* for Tavor hack below */
51#include <linux/slab.h>
51 52
52#include "xprt_rdma.h" 53#include "xprt_rdma.h"
53 54
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 75ab08eac66b..9847c30b5001 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -548,8 +548,6 @@ static int xs_udp_send_request(struct rpc_task *task)
548 /* Still some bytes left; set up for a retry later. */ 548 /* Still some bytes left; set up for a retry later. */
549 status = -EAGAIN; 549 status = -EAGAIN;
550 } 550 }
551 if (!transport->sock)
552 goto out;
553 551
554 switch (status) { 552 switch (status) {
555 case -ENOTSOCK: 553 case -ENOTSOCK:
@@ -569,7 +567,7 @@ static int xs_udp_send_request(struct rpc_task *task)
569 * prompts ECONNREFUSED. */ 567 * prompts ECONNREFUSED. */
570 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 568 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
571 } 569 }
572out: 570
573 return status; 571 return status;
574} 572}
575 573
@@ -651,8 +649,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
651 status = -EAGAIN; 649 status = -EAGAIN;
652 break; 650 break;
653 } 651 }
654 if (!transport->sock)
655 goto out;
656 652
657 switch (status) { 653 switch (status) {
658 case -ENOTSOCK: 654 case -ENOTSOCK:
@@ -672,7 +668,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
672 case -ENOTCONN: 668 case -ENOTCONN:
673 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 669 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
674 } 670 }
675out: 671
676 return status; 672 return status;
677} 673}
678 674
@@ -2255,9 +2251,6 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2255 .buf_free = rpc_free, 2251 .buf_free = rpc_free,
2256 .send_request = xs_tcp_send_request, 2252 .send_request = xs_tcp_send_request,
2257 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2253 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2258#if defined(CONFIG_NFS_V4_1)
2259 .release_request = bc_release_request,
2260#endif /* CONFIG_NFS_V4_1 */
2261 .close = xs_tcp_close, 2254 .close = xs_tcp_close,
2262 .destroy = xs_destroy, 2255 .destroy = xs_destroy,
2263 .print_stats = xs_tcp_print_stats, 2256 .print_stats = xs_tcp_print_stats,
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 52c571fedbe0..4e84c8431f32 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -49,7 +49,7 @@
49#include "config.h" 49#include "config.h"
50 50
51 51
52#define TIPC_MOD_VER "1.6.4" 52#define TIPC_MOD_VER "2.0.0"
53 53
54#ifndef CONFIG_TIPC_ZONES 54#ifndef CONFIG_TIPC_ZONES
55#define CONFIG_TIPC_ZONES 3 55#define CONFIG_TIPC_ZONES 3
diff --git a/net/tipc/core.h b/net/tipc/core.h
index a881f92a8537..c58a1d16563a 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -56,6 +56,7 @@
56#include <linux/netdevice.h> 56#include <linux/netdevice.h>
57#include <linux/in.h> 57#include <linux/in.h>
58#include <linux/list.h> 58#include <linux/list.h>
59#include <linux/slab.h>
59#include <linux/vmalloc.h> 60#include <linux/vmalloc.h>
60 61
61/* 62/*
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 524ba5696d4d..6230d16020c4 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -38,6 +38,7 @@
38#include <net/tipc/tipc_bearer.h> 38#include <net/tipc/tipc_bearer.h>
39#include <net/tipc/tipc_msg.h> 39#include <net/tipc/tipc_msg.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/slab.h>
41#include <net/net_namespace.h> 42#include <net/net_namespace.h>
42 43
43#define MAX_ETH_BEARERS 2 44#define MAX_ETH_BEARERS 2
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 49f2be8622a9..c76e82e5f982 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -877,7 +877,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
877 case TIMEOUT_EVT: 877 case TIMEOUT_EVT:
878 dbg_link("TIM "); 878 dbg_link("TIM ");
879 if (l_ptr->next_in_no != l_ptr->checkpoint) { 879 if (l_ptr->next_in_no != l_ptr->checkpoint) {
880 dbg_link("-> WW \n"); 880 dbg_link("-> WW\n");
881 l_ptr->state = WORKING_WORKING; 881 l_ptr->state = WORKING_WORKING;
882 l_ptr->fsm_msg_cnt = 0; 882 l_ptr->fsm_msg_cnt = 0;
883 l_ptr->checkpoint = l_ptr->next_in_no; 883 l_ptr->checkpoint = l_ptr->next_in_no;
@@ -934,7 +934,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
934 link_set_timer(l_ptr, cont_intv); 934 link_set_timer(l_ptr, cont_intv);
935 break; 935 break;
936 case RESET_MSG: 936 case RESET_MSG:
937 dbg_link("RES \n"); 937 dbg_link("RES\n");
938 dbg_link(" -> RR\n"); 938 dbg_link(" -> RR\n");
939 l_ptr->state = RESET_RESET; 939 l_ptr->state = RESET_RESET;
940 l_ptr->fsm_msg_cnt = 0; 940 l_ptr->fsm_msg_cnt = 0;
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
947 l_ptr->started = 1; 947 l_ptr->started = 1;
948 /* fall through */ 948 /* fall through */
949 case TIMEOUT_EVT: 949 case TIMEOUT_EVT:
950 dbg_link("TIM \n"); 950 dbg_link("TIM\n");
951 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 951 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
952 l_ptr->fsm_msg_cnt++; 952 l_ptr->fsm_msg_cnt++;
953 link_set_timer(l_ptr, cont_intv); 953 link_set_timer(l_ptr, cont_intv);
@@ -3295,7 +3295,7 @@ static void link_dump_rec_queue(struct link *l_ptr)
3295 info("buffer %x invalid\n", crs); 3295 info("buffer %x invalid\n", crs);
3296 return; 3296 return;
3297 } 3297 }
3298 msg_dbg(buf_msg(crs), "In rec queue: \n"); 3298 msg_dbg(buf_msg(crs), "In rec queue:\n");
3299 crs = crs->next; 3299 crs = crs->next;
3300 } 3300 }
3301} 3301}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f25b1cdb64eb..d7cd1e064a80 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,7 @@
116*/ 116*/
117 117
118DEFINE_RWLOCK(tipc_net_lock); 118DEFINE_RWLOCK(tipc_net_lock);
119struct _zone *tipc_zones[256] = { NULL, }; 119static struct _zone *tipc_zones[256] = { NULL, };
120struct network tipc_net = { tipc_zones }; 120struct network tipc_net = { tipc_zones };
121 121
122struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) 122struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
@@ -291,6 +291,6 @@ void tipc_net_stop(void)
291 tipc_bclink_stop(); 291 tipc_bclink_stop();
292 net_stop(); 292 net_stop();
293 write_unlock_bh(&tipc_net_lock); 293 write_unlock_bh(&tipc_net_lock);
294 info("Left network mode \n"); 294 info("Left network mode\n");
295} 295}
296 296
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2c24e7d6d950..17cc394f424f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -278,7 +278,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
278 n_ptr->link_cnt++; 278 n_ptr->link_cnt++;
279 return n_ptr; 279 return n_ptr;
280 } 280 }
281 err("Attempt to establish second link on <%s> to %s \n", 281 err("Attempt to establish second link on <%s> to %s\n",
282 l_ptr->b_ptr->publ.name, 282 l_ptr->b_ptr->publ.name,
283 addr_string_fill(addr_string, l_ptr->addr)); 283 addr_string_fill(addr_string, l_ptr->addr));
284 } 284 }
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 4b235fc1c70f..cfb20b80b3a1 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -40,9 +40,9 @@
40#include <linux/socket.h> 40#include <linux/socket.h>
41#include <linux/errno.h> 41#include <linux/errno.h>
42#include <linux/mm.h> 42#include <linux/mm.h>
43#include <linux/slab.h>
44#include <linux/poll.h> 43#include <linux/poll.h>
45#include <linux/fcntl.h> 44#include <linux/fcntl.h>
45#include <linux/gfp.h>
46#include <asm/string.h> 46#include <asm/string.h>
47#include <asm/atomic.h> 47#include <asm/atomic.h>
48#include <net/sock.h> 48#include <net/sock.h>
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ff123e56114a..ab6eab4c45e2 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -274,7 +274,7 @@ static void subscr_cancel(struct tipc_subscr *s,
274{ 274{
275 struct subscription *sub; 275 struct subscription *sub;
276 struct subscription *sub_temp; 276 struct subscription *sub_temp;
277 __u32 type, lower, upper; 277 __u32 type, lower, upper, timeout, filter;
278 int found = 0; 278 int found = 0;
279 279
280 /* Find first matching subscription, exit if not found */ 280 /* Find first matching subscription, exit if not found */
@@ -282,12 +282,18 @@ static void subscr_cancel(struct tipc_subscr *s,
282 type = ntohl(s->seq.type); 282 type = ntohl(s->seq.type);
283 lower = ntohl(s->seq.lower); 283 lower = ntohl(s->seq.lower);
284 upper = ntohl(s->seq.upper); 284 upper = ntohl(s->seq.upper);
285 timeout = ntohl(s->timeout);
286 filter = ntohl(s->filter) & ~TIPC_SUB_CANCEL;
285 287
286 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 288 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
287 subscription_list) { 289 subscription_list) {
288 if ((type == sub->seq.type) && 290 if ((type == sub->seq.type) &&
289 (lower == sub->seq.lower) && 291 (lower == sub->seq.lower) &&
290 (upper == sub->seq.upper)) { 292 (upper == sub->seq.upper) &&
293 (timeout == sub->timeout) &&
294 (filter == sub->filter) &&
295 !memcmp(s->usr_handle,sub->evt.s.usr_handle,
296 sizeof(s->usr_handle)) ){
291 found = 1; 297 found = 1;
292 break; 298 break;
293 } 299 }
@@ -304,7 +310,7 @@ static void subscr_cancel(struct tipc_subscr *s,
304 k_term_timer(&sub->timer); 310 k_term_timer(&sub->timer);
305 spin_lock_bh(subscriber->lock); 311 spin_lock_bh(subscriber->lock);
306 } 312 }
307 dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n", 313 dbg("Cancel: removing sub %u,%u,%u from subscriber %p list\n",
308 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); 314 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
309 subscr_del(sub); 315 subscr_del(sub);
310} 316}
@@ -352,8 +358,7 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
352 sub->seq.upper = ntohl(s->seq.upper); 358 sub->seq.upper = ntohl(s->seq.upper);
353 sub->timeout = ntohl(s->timeout); 359 sub->timeout = ntohl(s->timeout);
354 sub->filter = ntohl(s->filter); 360 sub->filter = ntohl(s->filter);
355 if ((!(sub->filter & TIPC_SUB_PORTS) == 361 if ((sub->filter && (sub->filter != TIPC_SUB_PORTS)) ||
356 !(sub->filter & TIPC_SUB_SERVICE)) ||
357 (sub->seq.lower > sub->seq.upper)) { 362 (sub->seq.lower > sub->seq.upper)) {
358 warn("Subscription rejected, illegal request\n"); 363 warn("Subscription rejected, illegal request\n");
359 kfree(sub); 364 kfree(sub);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 19c17e4a0c8b..14c22c3768da 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -74,7 +74,6 @@
74#include <linux/un.h> 74#include <linux/un.h>
75#include <linux/net.h> 75#include <linux/net.h>
76#include <linux/fs.h> 76#include <linux/fs.h>
77#include <linux/slab.h>
78#include <linux/skbuff.h> 77#include <linux/skbuff.h>
79#include <linux/netdevice.h> 78#include <linux/netdevice.h>
80#include <linux/file.h> 79#include <linux/file.h>
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index d095c7be10d0..397cffebb3b6 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/slab.h>
13#include <linux/sysctl.h> 14#include <linux/sysctl.h>
14 15
15#include <net/af_unix.h> 16#include <net/af_unix.h>
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index 7718657e93dc..d5b7c3779c43 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -72,6 +72,7 @@
72 * wimax_msg_send() 72 * wimax_msg_send()
73 */ 73 */
74#include <linux/device.h> 74#include <linux/device.h>
75#include <linux/slab.h>
75#include <net/genetlink.h> 76#include <net/genetlink.h>
76#include <linux/netdevice.h> 77#include <linux/netdevice.h>
77#include <linux/wimax.h> 78#include <linux/wimax.h>
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 4dc82a54ba30..68bedf3e5443 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -110,7 +110,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
110{ 110{
111 int result, ifindex; 111 int result, ifindex;
112 struct wimax_dev *wimax_dev; 112 struct wimax_dev *wimax_dev;
113 struct device *dev;
114 113
115 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); 114 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
116 result = -ENODEV; 115 result = -ENODEV;
@@ -123,7 +122,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
123 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); 122 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
124 if (wimax_dev == NULL) 123 if (wimax_dev == NULL)
125 goto error_no_wimax_dev; 124 goto error_no_wimax_dev;
126 dev = wimax_dev_to_dev(wimax_dev);
127 /* Execute the operation and send the result back to user space */ 125 /* Execute the operation and send the result back to user space */
128 result = wimax_reset(wimax_dev); 126 result = wimax_reset(wimax_dev);
129 dev_put(wimax_dev->net_dev); 127 dev_put(wimax_dev->net_dev);
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index 11ad3356eb56..aff8776e2d41 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -53,7 +53,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
53{ 53{
54 int result, ifindex; 54 int result, ifindex;
55 struct wimax_dev *wimax_dev; 55 struct wimax_dev *wimax_dev;
56 struct device *dev;
57 56
58 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); 57 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
59 result = -ENODEV; 58 result = -ENODEV;
@@ -66,7 +65,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
66 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); 65 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
67 if (wimax_dev == NULL) 66 if (wimax_dev == NULL)
68 goto error_no_wimax_dev; 67 goto error_no_wimax_dev;
69 dev = wimax_dev_to_dev(wimax_dev);
70 /* Execute the operation and send the result back to user space */ 68 /* Execute the operation and send the result back to user space */
71 result = wimax_state_get(wimax_dev); 69 result = wimax_state_get(wimax_dev);
72 dev_put(wimax_dev->net_dev); 70 dev_put(wimax_dev->net_dev);
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 813e1eaea29b..1ed65dbdab03 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -51,6 +51,7 @@
51 * wimax_rfkill_rm() 51 * wimax_rfkill_rm()
52 */ 52 */
53#include <linux/device.h> 53#include <linux/device.h>
54#include <linux/gfp.h>
54#include <net/genetlink.h> 55#include <net/genetlink.h>
55#include <linux/netdevice.h> 56#include <linux/netdevice.h>
56#include <linux/wimax.h> 57#include <linux/wimax.h>
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 7fdb9409ad2a..6ac70c101523 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/err.h> 9#include <linux/err.h>
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/slab.h>
11#include <linux/nl80211.h> 12#include <linux/nl80211.h>
12#include <linux/debugfs.h> 13#include <linux/debugfs.h>
13#include <linux/notifier.h> 14#include <linux/notifier.h>
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d52da913145a..b2234b436ead 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -293,13 +293,15 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
293 const u8 *bssid, 293 const u8 *bssid,
294 const u8 *ssid, int ssid_len, 294 const u8 *ssid, int ssid_len,
295 const u8 *ie, int ie_len, 295 const u8 *ie, int ie_len,
296 const u8 *key, int key_len, int key_idx); 296 const u8 *key, int key_len, int key_idx,
297 bool local_state_change);
297int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, 298int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
298 struct net_device *dev, struct ieee80211_channel *chan, 299 struct net_device *dev, struct ieee80211_channel *chan,
299 enum nl80211_auth_type auth_type, const u8 *bssid, 300 enum nl80211_auth_type auth_type, const u8 *bssid,
300 const u8 *ssid, int ssid_len, 301 const u8 *ssid, int ssid_len,
301 const u8 *ie, int ie_len, 302 const u8 *ie, int ie_len,
302 const u8 *key, int key_len, int key_idx); 303 const u8 *key, int key_len, int key_idx,
304 bool local_state_change);
303int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 305int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
304 struct net_device *dev, 306 struct net_device *dev,
305 struct ieee80211_channel *chan, 307 struct ieee80211_channel *chan,
@@ -315,13 +317,16 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
315 struct cfg80211_crypto_settings *crypt); 317 struct cfg80211_crypto_settings *crypt);
316int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 318int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
317 struct net_device *dev, const u8 *bssid, 319 struct net_device *dev, const u8 *bssid,
318 const u8 *ie, int ie_len, u16 reason); 320 const u8 *ie, int ie_len, u16 reason,
321 bool local_state_change);
319int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 322int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
320 struct net_device *dev, const u8 *bssid, 323 struct net_device *dev, const u8 *bssid,
321 const u8 *ie, int ie_len, u16 reason); 324 const u8 *ie, int ie_len, u16 reason,
325 bool local_state_change);
322int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 326int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
323 struct net_device *dev, const u8 *bssid, 327 struct net_device *dev, const u8 *bssid,
324 const u8 *ie, int ie_len, u16 reason); 328 const u8 *ie, int ie_len, u16 reason,
329 bool local_state_change);
325void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, 330void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
326 struct net_device *dev); 331 struct net_device *dev);
327void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, 332void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 2e4895615037..a4991a3efec0 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include "core.h" 13#include "core.h"
13#include "debugfs.h" 14#include "debugfs.h"
14 15
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 6ef5a491fb4b..6a5acf750174 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
8#include <linux/if_arp.h> 8#include <linux/if_arp.h>
9#include <linux/slab.h>
9#include <net/cfg80211.h> 10#include <net/cfg80211.h>
10#include "wext-compat.h" 11#include "wext-compat.h"
11#include "nl80211.h" 12#include "nl80211.h"
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 62bc8855e123..48ead6f0426d 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/nl80211.h> 10#include <linux/nl80211.h>
11#include <linux/slab.h>
11#include <linux/wireless.h> 12#include <linux/wireless.h>
12#include <net/cfg80211.h> 13#include <net/cfg80211.h>
13#include <net/iw_handler.h> 14#include <net/iw_handler.h>
@@ -377,7 +378,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
377 const u8 *bssid, 378 const u8 *bssid,
378 const u8 *ssid, int ssid_len, 379 const u8 *ssid, int ssid_len,
379 const u8 *ie, int ie_len, 380 const u8 *ie, int ie_len,
380 const u8 *key, int key_len, int key_idx) 381 const u8 *key, int key_len, int key_idx,
382 bool local_state_change)
381{ 383{
382 struct wireless_dev *wdev = dev->ieee80211_ptr; 384 struct wireless_dev *wdev = dev->ieee80211_ptr;
383 struct cfg80211_auth_request req; 385 struct cfg80211_auth_request req;
@@ -407,6 +409,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
407 409
408 memset(&req, 0, sizeof(req)); 410 memset(&req, 0, sizeof(req));
409 411
412 req.local_state_change = local_state_change;
410 req.ie = ie; 413 req.ie = ie;
411 req.ie_len = ie_len; 414 req.ie_len = ie_len;
412 req.auth_type = auth_type; 415 req.auth_type = auth_type;
@@ -433,12 +436,18 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
433 goto out; 436 goto out;
434 } 437 }
435 438
436 wdev->authtry_bsses[slot] = bss; 439 if (local_state_change)
440 wdev->auth_bsses[slot] = bss;
441 else
442 wdev->authtry_bsses[slot] = bss;
437 cfg80211_hold_bss(bss); 443 cfg80211_hold_bss(bss);
438 444
439 err = rdev->ops->auth(&rdev->wiphy, dev, &req); 445 err = rdev->ops->auth(&rdev->wiphy, dev, &req);
440 if (err) { 446 if (err) {
441 wdev->authtry_bsses[slot] = NULL; 447 if (local_state_change)
448 wdev->auth_bsses[slot] = NULL;
449 else
450 wdev->authtry_bsses[slot] = NULL;
442 cfg80211_unhold_bss(bss); 451 cfg80211_unhold_bss(bss);
443 } 452 }
444 453
@@ -453,14 +462,15 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
453 enum nl80211_auth_type auth_type, const u8 *bssid, 462 enum nl80211_auth_type auth_type, const u8 *bssid,
454 const u8 *ssid, int ssid_len, 463 const u8 *ssid, int ssid_len,
455 const u8 *ie, int ie_len, 464 const u8 *ie, int ie_len,
456 const u8 *key, int key_len, int key_idx) 465 const u8 *key, int key_len, int key_idx,
466 bool local_state_change)
457{ 467{
458 int err; 468 int err;
459 469
460 wdev_lock(dev->ieee80211_ptr); 470 wdev_lock(dev->ieee80211_ptr);
461 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 471 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
462 ssid, ssid_len, ie, ie_len, 472 ssid, ssid_len, ie, ie_len,
463 key, key_len, key_idx); 473 key, key_len, key_idx, local_state_change);
464 wdev_unlock(dev->ieee80211_ptr); 474 wdev_unlock(dev->ieee80211_ptr);
465 475
466 return err; 476 return err;
@@ -554,7 +564,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
554 564
555int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 565int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
556 struct net_device *dev, const u8 *bssid, 566 struct net_device *dev, const u8 *bssid,
557 const u8 *ie, int ie_len, u16 reason) 567 const u8 *ie, int ie_len, u16 reason,
568 bool local_state_change)
558{ 569{
559 struct wireless_dev *wdev = dev->ieee80211_ptr; 570 struct wireless_dev *wdev = dev->ieee80211_ptr;
560 struct cfg80211_deauth_request req; 571 struct cfg80211_deauth_request req;
@@ -564,6 +575,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
564 575
565 memset(&req, 0, sizeof(req)); 576 memset(&req, 0, sizeof(req));
566 req.reason_code = reason; 577 req.reason_code = reason;
578 req.local_state_change = local_state_change;
567 req.ie = ie; 579 req.ie = ie;
568 req.ie_len = ie_len; 580 req.ie_len = ie_len;
569 if (wdev->current_bss && 581 if (wdev->current_bss &&
@@ -590,13 +602,15 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
590 602
591int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 603int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
592 struct net_device *dev, const u8 *bssid, 604 struct net_device *dev, const u8 *bssid,
593 const u8 *ie, int ie_len, u16 reason) 605 const u8 *ie, int ie_len, u16 reason,
606 bool local_state_change)
594{ 607{
595 struct wireless_dev *wdev = dev->ieee80211_ptr; 608 struct wireless_dev *wdev = dev->ieee80211_ptr;
596 int err; 609 int err;
597 610
598 wdev_lock(wdev); 611 wdev_lock(wdev);
599 err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason); 612 err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason,
613 local_state_change);
600 wdev_unlock(wdev); 614 wdev_unlock(wdev);
601 615
602 return err; 616 return err;
@@ -604,7 +618,8 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
604 618
605static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 619static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
606 struct net_device *dev, const u8 *bssid, 620 struct net_device *dev, const u8 *bssid,
607 const u8 *ie, int ie_len, u16 reason) 621 const u8 *ie, int ie_len, u16 reason,
622 bool local_state_change)
608{ 623{
609 struct wireless_dev *wdev = dev->ieee80211_ptr; 624 struct wireless_dev *wdev = dev->ieee80211_ptr;
610 struct cfg80211_disassoc_request req; 625 struct cfg80211_disassoc_request req;
@@ -619,6 +634,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
619 634
620 memset(&req, 0, sizeof(req)); 635 memset(&req, 0, sizeof(req));
621 req.reason_code = reason; 636 req.reason_code = reason;
637 req.local_state_change = local_state_change;
622 req.ie = ie; 638 req.ie = ie;
623 req.ie_len = ie_len; 639 req.ie_len = ie_len;
624 if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) 640 if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0)
@@ -631,13 +647,15 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
631 647
632int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 648int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
633 struct net_device *dev, const u8 *bssid, 649 struct net_device *dev, const u8 *bssid,
634 const u8 *ie, int ie_len, u16 reason) 650 const u8 *ie, int ie_len, u16 reason,
651 bool local_state_change)
635{ 652{
636 struct wireless_dev *wdev = dev->ieee80211_ptr; 653 struct wireless_dev *wdev = dev->ieee80211_ptr;
637 int err; 654 int err;
638 655
639 wdev_lock(wdev); 656 wdev_lock(wdev);
640 err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason); 657 err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason,
658 local_state_change);
641 wdev_unlock(wdev); 659 wdev_unlock(wdev);
642 660
643 return err; 661 return err;
@@ -894,3 +912,16 @@ void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
894 nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); 912 nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
895} 913}
896EXPORT_SYMBOL(cfg80211_action_tx_status); 914EXPORT_SYMBOL(cfg80211_action_tx_status);
915
916void cfg80211_cqm_rssi_notify(struct net_device *dev,
917 enum nl80211_cqm_rssi_threshold_event rssi_event,
918 gfp_t gfp)
919{
920 struct wireless_dev *wdev = dev->ieee80211_ptr;
921 struct wiphy *wiphy = wdev->wiphy;
922 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
923
924 /* Indicate roaming trigger event to user space */
925 nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
926}
927EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e447db04cf76..356a84a5daee 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7,6 +7,7 @@
7#include <linux/if.h> 7#include <linux/if.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/err.h> 9#include <linux/err.h>
10#include <linux/slab.h>
10#include <linux/list.h> 11#include <linux/list.h>
11#include <linux/if_ether.h> 12#include <linux/if_ether.h>
12#include <linux/ieee80211.h> 13#include <linux/ieee80211.h>
@@ -149,6 +150,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
149 .len = IEEE80211_MAX_DATA_LEN }, 150 .len = IEEE80211_MAX_DATA_LEN },
150 [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, 151 [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
151 [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, 152 [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
153 [NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
154 [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
152}; 155};
153 156
154/* policy for the attributes */ 157/* policy for the attributes */
@@ -2095,7 +2098,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
2095 goto out_rtnl; 2098 goto out_rtnl;
2096 2099
2097 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2100 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2098 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 2101 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2102 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
2099 err = -EINVAL; 2103 err = -EINVAL;
2100 goto out; 2104 goto out;
2101 } 2105 }
@@ -3391,6 +3395,7 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
3391 int err, ssid_len, ie_len = 0; 3395 int err, ssid_len, ie_len = 0;
3392 enum nl80211_auth_type auth_type; 3396 enum nl80211_auth_type auth_type;
3393 struct key_parse key; 3397 struct key_parse key;
3398 bool local_state_change;
3394 3399
3395 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 3400 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3396 return -EINVAL; 3401 return -EINVAL;
@@ -3469,9 +3474,12 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
3469 goto out; 3474 goto out;
3470 } 3475 }
3471 3476
3477 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
3478
3472 err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 3479 err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
3473 ssid, ssid_len, ie, ie_len, 3480 ssid, ssid_len, ie, ie_len,
3474 key.p.key, key.p.key_len, key.idx); 3481 key.p.key, key.p.key_len, key.idx,
3482 local_state_change);
3475 3483
3476out: 3484out:
3477 cfg80211_unlock_rdev(rdev); 3485 cfg80211_unlock_rdev(rdev);
@@ -3648,6 +3656,7 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
3648 const u8 *ie = NULL, *bssid; 3656 const u8 *ie = NULL, *bssid;
3649 int err, ie_len = 0; 3657 int err, ie_len = 0;
3650 u16 reason_code; 3658 u16 reason_code;
3659 bool local_state_change;
3651 3660
3652 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 3661 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3653 return -EINVAL; 3662 return -EINVAL;
@@ -3693,7 +3702,10 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
3693 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3702 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3694 } 3703 }
3695 3704
3696 err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code); 3705 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
3706
3707 err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code,
3708 local_state_change);
3697 3709
3698out: 3710out:
3699 cfg80211_unlock_rdev(rdev); 3711 cfg80211_unlock_rdev(rdev);
@@ -3710,6 +3722,7 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
3710 const u8 *ie = NULL, *bssid; 3722 const u8 *ie = NULL, *bssid;
3711 int err, ie_len = 0; 3723 int err, ie_len = 0;
3712 u16 reason_code; 3724 u16 reason_code;
3725 bool local_state_change;
3713 3726
3714 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 3727 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3715 return -EINVAL; 3728 return -EINVAL;
@@ -3755,7 +3768,10 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
3755 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3768 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3756 } 3769 }
3757 3770
3758 err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code); 3771 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
3772
3773 err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code,
3774 local_state_change);
3759 3775
3760out: 3776out:
3761 cfg80211_unlock_rdev(rdev); 3777 cfg80211_unlock_rdev(rdev);
@@ -4778,6 +4794,84 @@ unlock_rtnl:
4778 return err; 4794 return err;
4779} 4795}
4780 4796
4797static struct nla_policy
4798nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
4799 [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
4800 [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
4801 [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
4802};
4803
4804static int nl80211_set_cqm_rssi(struct genl_info *info,
4805 s32 threshold, u32 hysteresis)
4806{
4807 struct cfg80211_registered_device *rdev;
4808 struct wireless_dev *wdev;
4809 struct net_device *dev;
4810 int err;
4811
4812 if (threshold > 0)
4813 return -EINVAL;
4814
4815 rtnl_lock();
4816
4817 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4818 if (err)
4819 goto unlock_rdev;
4820
4821 wdev = dev->ieee80211_ptr;
4822
4823 if (!rdev->ops->set_cqm_rssi_config) {
4824 err = -EOPNOTSUPP;
4825 goto unlock_rdev;
4826 }
4827
4828 if (wdev->iftype != NL80211_IFTYPE_STATION) {
4829 err = -EOPNOTSUPP;
4830 goto unlock_rdev;
4831 }
4832
4833 err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev,
4834 threshold, hysteresis);
4835
4836unlock_rdev:
4837 cfg80211_unlock_rdev(rdev);
4838 dev_put(dev);
4839 rtnl_unlock();
4840
4841 return err;
4842}
4843
4844static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
4845{
4846 struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1];
4847 struct nlattr *cqm;
4848 int err;
4849
4850 cqm = info->attrs[NL80211_ATTR_CQM];
4851 if (!cqm) {
4852 err = -EINVAL;
4853 goto out;
4854 }
4855
4856 err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm,
4857 nl80211_attr_cqm_policy);
4858 if (err)
4859 goto out;
4860
4861 if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
4862 attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
4863 s32 threshold;
4864 u32 hysteresis;
4865 threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
4866 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
4867 err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
4868 } else
4869 err = -EINVAL;
4870
4871out:
4872 return err;
4873}
4874
4781static struct genl_ops nl80211_ops[] = { 4875static struct genl_ops nl80211_ops[] = {
4782 { 4876 {
4783 .cmd = NL80211_CMD_GET_WIPHY, 4877 .cmd = NL80211_CMD_GET_WIPHY,
@@ -5082,6 +5176,12 @@ static struct genl_ops nl80211_ops[] = {
5082 .policy = nl80211_policy, 5176 .policy = nl80211_policy,
5083 /* can be retrieved by unprivileged users */ 5177 /* can be retrieved by unprivileged users */
5084 }, 5178 },
5179 {
5180 .cmd = NL80211_CMD_SET_CQM,
5181 .doit = nl80211_set_cqm,
5182 .policy = nl80211_policy,
5183 .flags = GENL_ADMIN_PERM,
5184 },
5085}; 5185};
5086 5186
5087static struct genl_multicast_group nl80211_mlme_mcgrp = { 5187static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -5832,6 +5932,52 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
5832 nlmsg_free(msg); 5932 nlmsg_free(msg);
5833} 5933}
5834 5934
5935void
5936nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
5937 struct net_device *netdev,
5938 enum nl80211_cqm_rssi_threshold_event rssi_event,
5939 gfp_t gfp)
5940{
5941 struct sk_buff *msg;
5942 struct nlattr *pinfoattr;
5943 void *hdr;
5944
5945 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
5946 if (!msg)
5947 return;
5948
5949 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
5950 if (!hdr) {
5951 nlmsg_free(msg);
5952 return;
5953 }
5954
5955 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5956 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5957
5958 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
5959 if (!pinfoattr)
5960 goto nla_put_failure;
5961
5962 NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
5963 rssi_event);
5964
5965 nla_nest_end(msg, pinfoattr);
5966
5967 if (genlmsg_end(msg, hdr) < 0) {
5968 nlmsg_free(msg);
5969 return;
5970 }
5971
5972 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5973 nl80211_mlme_mcgrp.id, gfp);
5974 return;
5975
5976 nla_put_failure:
5977 genlmsg_cancel(msg, hdr);
5978 nlmsg_free(msg);
5979}
5980
5835static int nl80211_netlink_notify(struct notifier_block * nb, 5981static int nl80211_netlink_notify(struct notifier_block * nb,
5836 unsigned long state, 5982 unsigned long state,
5837 void *_notify) 5983 void *_notify)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 4ca511102c6c..2ad7fbc7d9f1 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -82,4 +82,10 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
82 const u8 *buf, size_t len, bool ack, 82 const u8 *buf, size_t len, bool ack,
83 gfp_t gfp); 83 gfp_t gfp);
84 84
85void
86nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
87 struct net_device *netdev,
88 enum nl80211_cqm_rssi_threshold_event rssi_event,
89 gfp_t gfp);
90
85#endif /* __NET_WIRELESS_NL80211_H */ 91#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ed89c59bb431..8f0d97dd3109 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -33,6 +33,7 @@
33 * 33 *
34 */ 34 */
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/slab.h>
36#include <linux/list.h> 37#include <linux/list.h>
37#include <linux/random.h> 38#include <linux/random.h>
38#include <linux/nl80211.h> 39#include <linux/nl80211.h>
@@ -324,7 +325,7 @@ struct reg_regdb_search_request {
324}; 325};
325 326
326static LIST_HEAD(reg_regdb_search_list); 327static LIST_HEAD(reg_regdb_search_list);
327static DEFINE_SPINLOCK(reg_regdb_search_lock); 328static DEFINE_MUTEX(reg_regdb_search_mutex);
328 329
329static void reg_regdb_search(struct work_struct *work) 330static void reg_regdb_search(struct work_struct *work)
330{ 331{
@@ -332,7 +333,7 @@ static void reg_regdb_search(struct work_struct *work)
332 const struct ieee80211_regdomain *curdom, *regdom; 333 const struct ieee80211_regdomain *curdom, *regdom;
333 int i, r; 334 int i, r;
334 335
335 spin_lock(&reg_regdb_search_lock); 336 mutex_lock(&reg_regdb_search_mutex);
336 while (!list_empty(&reg_regdb_search_list)) { 337 while (!list_empty(&reg_regdb_search_list)) {
337 request = list_first_entry(&reg_regdb_search_list, 338 request = list_first_entry(&reg_regdb_search_list,
338 struct reg_regdb_search_request, 339 struct reg_regdb_search_request,
@@ -346,18 +347,16 @@ static void reg_regdb_search(struct work_struct *work)
346 r = reg_copy_regd(&regdom, curdom); 347 r = reg_copy_regd(&regdom, curdom);
347 if (r) 348 if (r)
348 break; 349 break;
349 spin_unlock(&reg_regdb_search_lock);
350 mutex_lock(&cfg80211_mutex); 350 mutex_lock(&cfg80211_mutex);
351 set_regdom(regdom); 351 set_regdom(regdom);
352 mutex_unlock(&cfg80211_mutex); 352 mutex_unlock(&cfg80211_mutex);
353 spin_lock(&reg_regdb_search_lock);
354 break; 353 break;
355 } 354 }
356 } 355 }
357 356
358 kfree(request); 357 kfree(request);
359 } 358 }
360 spin_unlock(&reg_regdb_search_lock); 359 mutex_unlock(&reg_regdb_search_mutex);
361} 360}
362 361
363static DECLARE_WORK(reg_regdb_work, reg_regdb_search); 362static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
@@ -375,9 +374,9 @@ static void reg_regdb_query(const char *alpha2)
375 374
376 memcpy(request->alpha2, alpha2, 2); 375 memcpy(request->alpha2, alpha2, 2);
377 376
378 spin_lock(&reg_regdb_search_lock); 377 mutex_lock(&reg_regdb_search_mutex);
379 list_add_tail(&request->list, &reg_regdb_search_list); 378 list_add_tail(&request->list, &reg_regdb_search_list);
380 spin_unlock(&reg_regdb_search_lock); 379 mutex_unlock(&reg_regdb_search_mutex);
381 380
382 schedule_work(&reg_regdb_work); 381 schedule_work(&reg_regdb_work);
383} 382}
@@ -2357,10 +2356,10 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
2357 rdev->country_ie_alpha2[1]); 2356 rdev->country_ie_alpha2[1]);
2358 } else 2357 } else
2359 printk(KERN_INFO "cfg80211: Current regulatory " 2358 printk(KERN_INFO "cfg80211: Current regulatory "
2360 "domain intersected: \n"); 2359 "domain intersected:\n");
2361 } else 2360 } else
2362 printk(KERN_INFO "cfg80211: Current regulatory " 2361 printk(KERN_INFO "cfg80211: Current regulatory "
2363 "domain intersected: \n"); 2362 "domain intersected:\n");
2364 } else if (is_world_regdom(rd->alpha2)) 2363 } else if (is_world_regdom(rd->alpha2))
2365 printk(KERN_INFO "cfg80211: World regulatory " 2364 printk(KERN_INFO "cfg80211: World regulatory "
2366 "domain updated:\n"); 2365 "domain updated:\n");
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 978cac3414b5..a026c6d56bd3 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -4,6 +4,7 @@
4 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/slab.h>
7#include <linux/module.h> 8#include <linux/module.h>
8#include <linux/netdevice.h> 9#include <linux/netdevice.h>
9#include <linux/wireless.h> 10#include <linux/wireless.h>
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 17fde0da1b08..c2735775ec19 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/etherdevice.h> 8#include <linux/etherdevice.h>
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/slab.h>
10#include <linux/workqueue.h> 11#include <linux/workqueue.h>
11#include <linux/wireless.h> 12#include <linux/wireless.h>
12#include <net/iw_handler.h> 13#include <net/iw_handler.h>
@@ -170,7 +171,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
170 params->ssid, params->ssid_len, 171 params->ssid, params->ssid_len,
171 NULL, 0, 172 NULL, 0,
172 params->key, params->key_len, 173 params->key, params->key_len,
173 params->key_idx); 174 params->key_idx, false);
174 case CFG80211_CONN_ASSOCIATE_NEXT: 175 case CFG80211_CONN_ASSOCIATE_NEXT:
175 BUG_ON(!rdev->ops->assoc); 176 BUG_ON(!rdev->ops->assoc);
176 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 177 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -185,12 +186,13 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
185 if (err) 186 if (err)
186 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, 187 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
187 NULL, 0, 188 NULL, 0,
188 WLAN_REASON_DEAUTH_LEAVING); 189 WLAN_REASON_DEAUTH_LEAVING,
190 false);
189 return err; 191 return err;
190 case CFG80211_CONN_DEAUTH_ASSOC_FAIL: 192 case CFG80211_CONN_DEAUTH_ASSOC_FAIL:
191 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, 193 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
192 NULL, 0, 194 NULL, 0,
193 WLAN_REASON_DEAUTH_LEAVING); 195 WLAN_REASON_DEAUTH_LEAVING, false);
194 /* return an error so that we call __cfg80211_connect_result() */ 196 /* return an error so that we call __cfg80211_connect_result() */
195 return -EINVAL; 197 return -EINVAL;
196 default: 198 default:
@@ -675,7 +677,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
675 continue; 677 continue;
676 bssid = wdev->auth_bsses[i]->pub.bssid; 678 bssid = wdev->auth_bsses[i]->pub.bssid;
677 ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, 679 ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
678 WLAN_REASON_DEAUTH_LEAVING); 680 WLAN_REASON_DEAUTH_LEAVING,
681 false);
679 WARN(ret, "deauth failed: %d\n", ret); 682 WARN(ret, "deauth failed: %d\n", ret);
680 } 683 }
681 } 684 }
@@ -934,7 +937,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
934 /* wdev->conn->params.bssid must be set if > SCANNING */ 937 /* wdev->conn->params.bssid must be set if > SCANNING */
935 err = __cfg80211_mlme_deauth(rdev, dev, 938 err = __cfg80211_mlme_deauth(rdev, dev,
936 wdev->conn->params.bssid, 939 wdev->conn->params.bssid,
937 NULL, 0, reason); 940 NULL, 0, reason, false);
938 if (err) 941 if (err)
939 return err; 942 return err;
940 } else { 943 } else {
@@ -990,7 +993,8 @@ void cfg80211_sme_disassoc(struct net_device *dev, int idx)
990 993
991 memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN); 994 memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN);
992 if (__cfg80211_mlme_deauth(rdev, dev, bssid, 995 if (__cfg80211_mlme_deauth(rdev, dev, bssid,
993 NULL, 0, WLAN_REASON_DEAUTH_LEAVING)) { 996 NULL, 0, WLAN_REASON_DEAUTH_LEAVING,
997 false)) {
994 /* whatever -- assume gone anyway */ 998 /* whatever -- assume gone anyway */
995 cfg80211_unhold_bss(wdev->auth_bsses[idx]); 999 cfg80211_unhold_bss(wdev->auth_bsses[idx]);
996 cfg80211_put_bss(&wdev->auth_bsses[idx]->pub); 1000 cfg80211_put_bss(&wdev->auth_bsses[idx]->pub);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index be2ab8c59e3a..3416373a9c0c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/bitops.h> 6#include <linux/bitops.h>
7#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
8#include <linux/slab.h>
8#include <net/cfg80211.h> 9#include <net/cfg80211.h>
9#include <net/ip.h> 10#include <net/ip.h>
10#include "core.h" 11#include "core.h"
@@ -330,11 +331,18 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
330 if (iftype == NL80211_IFTYPE_MESH_POINT) { 331 if (iftype == NL80211_IFTYPE_MESH_POINT) {
331 struct ieee80211s_hdr *meshdr = 332 struct ieee80211s_hdr *meshdr =
332 (struct ieee80211s_hdr *) (skb->data + hdrlen); 333 (struct ieee80211s_hdr *) (skb->data + hdrlen);
333 hdrlen += ieee80211_get_mesh_hdrlen(meshdr); 334 /* make sure meshdr->flags is on the linear part */
335 if (!pskb_may_pull(skb, hdrlen + 1))
336 return -1;
334 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { 337 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
335 memcpy(dst, meshdr->eaddr1, ETH_ALEN); 338 skb_copy_bits(skb, hdrlen +
336 memcpy(src, meshdr->eaddr2, ETH_ALEN); 339 offsetof(struct ieee80211s_hdr, eaddr1),
340 dst, ETH_ALEN);
341 skb_copy_bits(skb, hdrlen +
342 offsetof(struct ieee80211s_hdr, eaddr2),
343 src, ETH_ALEN);
337 } 344 }
345 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
338 } 346 }
339 break; 347 break;
340 case cpu_to_le16(IEEE80211_FCTL_FROMDS): 348 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
@@ -346,9 +354,14 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
346 if (iftype == NL80211_IFTYPE_MESH_POINT) { 354 if (iftype == NL80211_IFTYPE_MESH_POINT) {
347 struct ieee80211s_hdr *meshdr = 355 struct ieee80211s_hdr *meshdr =
348 (struct ieee80211s_hdr *) (skb->data + hdrlen); 356 (struct ieee80211s_hdr *) (skb->data + hdrlen);
349 hdrlen += ieee80211_get_mesh_hdrlen(meshdr); 357 /* make sure meshdr->flags is on the linear part */
358 if (!pskb_may_pull(skb, hdrlen + 1))
359 return -1;
350 if (meshdr->flags & MESH_FLAGS_AE_A4) 360 if (meshdr->flags & MESH_FLAGS_AE_A4)
351 memcpy(src, meshdr->eaddr1, ETH_ALEN); 361 skb_copy_bits(skb, hdrlen +
362 offsetof(struct ieee80211s_hdr, eaddr1),
363 src, ETH_ALEN);
364 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
352 } 365 }
353 break; 366 break;
354 case cpu_to_le16(0): 367 case cpu_to_le16(0):
@@ -357,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
357 break; 370 break;
358 } 371 }
359 372
360 if (unlikely(skb->len - hdrlen < 8)) 373 if (!pskb_may_pull(skb, hdrlen + 8))
361 return -1; 374 return -1;
362 375
363 payload = skb->data + hdrlen; 376 payload = skb->data + hdrlen;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 9ab51838849e..a60a2773b497 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -12,6 +12,7 @@
12#include <linux/nl80211.h> 12#include <linux/nl80211.h>
13#include <linux/if_arp.h> 13#include <linux/if_arp.h>
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/slab.h>
15#include <net/iw_handler.h> 16#include <net/iw_handler.h>
16#include <net/cfg80211.h> 17#include <net/cfg80211.h>
17#include "wext-compat.h" 18#include "wext-compat.h"
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 5e1656bdf23b..0ef17bc42bac 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include <linux/slab.h>
13#include <linux/wireless.h> 14#include <linux/wireless.h>
14#include <linux/uaccess.h> 15#include <linux/uaccess.h>
15#include <net/cfg80211.h> 16#include <net/cfg80211.h>
@@ -28,226 +29,226 @@ typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *,
28 * know about. 29 * know about.
29 */ 30 */
30static const struct iw_ioctl_description standard_ioctl[] = { 31static const struct iw_ioctl_description standard_ioctl[] = {
31 [SIOCSIWCOMMIT - SIOCIWFIRST] = { 32 [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = {
32 .header_type = IW_HEADER_TYPE_NULL, 33 .header_type = IW_HEADER_TYPE_NULL,
33 }, 34 },
34 [SIOCGIWNAME - SIOCIWFIRST] = { 35 [IW_IOCTL_IDX(SIOCGIWNAME)] = {
35 .header_type = IW_HEADER_TYPE_CHAR, 36 .header_type = IW_HEADER_TYPE_CHAR,
36 .flags = IW_DESCR_FLAG_DUMP, 37 .flags = IW_DESCR_FLAG_DUMP,
37 }, 38 },
38 [SIOCSIWNWID - SIOCIWFIRST] = { 39 [IW_IOCTL_IDX(SIOCSIWNWID)] = {
39 .header_type = IW_HEADER_TYPE_PARAM, 40 .header_type = IW_HEADER_TYPE_PARAM,
40 .flags = IW_DESCR_FLAG_EVENT, 41 .flags = IW_DESCR_FLAG_EVENT,
41 }, 42 },
42 [SIOCGIWNWID - SIOCIWFIRST] = { 43 [IW_IOCTL_IDX(SIOCGIWNWID)] = {
43 .header_type = IW_HEADER_TYPE_PARAM, 44 .header_type = IW_HEADER_TYPE_PARAM,
44 .flags = IW_DESCR_FLAG_DUMP, 45 .flags = IW_DESCR_FLAG_DUMP,
45 }, 46 },
46 [SIOCSIWFREQ - SIOCIWFIRST] = { 47 [IW_IOCTL_IDX(SIOCSIWFREQ)] = {
47 .header_type = IW_HEADER_TYPE_FREQ, 48 .header_type = IW_HEADER_TYPE_FREQ,
48 .flags = IW_DESCR_FLAG_EVENT, 49 .flags = IW_DESCR_FLAG_EVENT,
49 }, 50 },
50 [SIOCGIWFREQ - SIOCIWFIRST] = { 51 [IW_IOCTL_IDX(SIOCGIWFREQ)] = {
51 .header_type = IW_HEADER_TYPE_FREQ, 52 .header_type = IW_HEADER_TYPE_FREQ,
52 .flags = IW_DESCR_FLAG_DUMP, 53 .flags = IW_DESCR_FLAG_DUMP,
53 }, 54 },
54 [SIOCSIWMODE - SIOCIWFIRST] = { 55 [IW_IOCTL_IDX(SIOCSIWMODE)] = {
55 .header_type = IW_HEADER_TYPE_UINT, 56 .header_type = IW_HEADER_TYPE_UINT,
56 .flags = IW_DESCR_FLAG_EVENT, 57 .flags = IW_DESCR_FLAG_EVENT,
57 }, 58 },
58 [SIOCGIWMODE - SIOCIWFIRST] = { 59 [IW_IOCTL_IDX(SIOCGIWMODE)] = {
59 .header_type = IW_HEADER_TYPE_UINT, 60 .header_type = IW_HEADER_TYPE_UINT,
60 .flags = IW_DESCR_FLAG_DUMP, 61 .flags = IW_DESCR_FLAG_DUMP,
61 }, 62 },
62 [SIOCSIWSENS - SIOCIWFIRST] = { 63 [IW_IOCTL_IDX(SIOCSIWSENS)] = {
63 .header_type = IW_HEADER_TYPE_PARAM, 64 .header_type = IW_HEADER_TYPE_PARAM,
64 }, 65 },
65 [SIOCGIWSENS - SIOCIWFIRST] = { 66 [IW_IOCTL_IDX(SIOCGIWSENS)] = {
66 .header_type = IW_HEADER_TYPE_PARAM, 67 .header_type = IW_HEADER_TYPE_PARAM,
67 }, 68 },
68 [SIOCSIWRANGE - SIOCIWFIRST] = { 69 [IW_IOCTL_IDX(SIOCSIWRANGE)] = {
69 .header_type = IW_HEADER_TYPE_NULL, 70 .header_type = IW_HEADER_TYPE_NULL,
70 }, 71 },
71 [SIOCGIWRANGE - SIOCIWFIRST] = { 72 [IW_IOCTL_IDX(SIOCGIWRANGE)] = {
72 .header_type = IW_HEADER_TYPE_POINT, 73 .header_type = IW_HEADER_TYPE_POINT,
73 .token_size = 1, 74 .token_size = 1,
74 .max_tokens = sizeof(struct iw_range), 75 .max_tokens = sizeof(struct iw_range),
75 .flags = IW_DESCR_FLAG_DUMP, 76 .flags = IW_DESCR_FLAG_DUMP,
76 }, 77 },
77 [SIOCSIWPRIV - SIOCIWFIRST] = { 78 [IW_IOCTL_IDX(SIOCSIWPRIV)] = {
78 .header_type = IW_HEADER_TYPE_NULL, 79 .header_type = IW_HEADER_TYPE_NULL,
79 }, 80 },
80 [SIOCGIWPRIV - SIOCIWFIRST] = { /* (handled directly by us) */ 81 [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */
81 .header_type = IW_HEADER_TYPE_POINT, 82 .header_type = IW_HEADER_TYPE_POINT,
82 .token_size = sizeof(struct iw_priv_args), 83 .token_size = sizeof(struct iw_priv_args),
83 .max_tokens = 16, 84 .max_tokens = 16,
84 .flags = IW_DESCR_FLAG_NOMAX, 85 .flags = IW_DESCR_FLAG_NOMAX,
85 }, 86 },
86 [SIOCSIWSTATS - SIOCIWFIRST] = { 87 [IW_IOCTL_IDX(SIOCSIWSTATS)] = {
87 .header_type = IW_HEADER_TYPE_NULL, 88 .header_type = IW_HEADER_TYPE_NULL,
88 }, 89 },
89 [SIOCGIWSTATS - SIOCIWFIRST] = { /* (handled directly by us) */ 90 [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */
90 .header_type = IW_HEADER_TYPE_POINT, 91 .header_type = IW_HEADER_TYPE_POINT,
91 .token_size = 1, 92 .token_size = 1,
92 .max_tokens = sizeof(struct iw_statistics), 93 .max_tokens = sizeof(struct iw_statistics),
93 .flags = IW_DESCR_FLAG_DUMP, 94 .flags = IW_DESCR_FLAG_DUMP,
94 }, 95 },
95 [SIOCSIWSPY - SIOCIWFIRST] = { 96 [IW_IOCTL_IDX(SIOCSIWSPY)] = {
96 .header_type = IW_HEADER_TYPE_POINT, 97 .header_type = IW_HEADER_TYPE_POINT,
97 .token_size = sizeof(struct sockaddr), 98 .token_size = sizeof(struct sockaddr),
98 .max_tokens = IW_MAX_SPY, 99 .max_tokens = IW_MAX_SPY,
99 }, 100 },
100 [SIOCGIWSPY - SIOCIWFIRST] = { 101 [IW_IOCTL_IDX(SIOCGIWSPY)] = {
101 .header_type = IW_HEADER_TYPE_POINT, 102 .header_type = IW_HEADER_TYPE_POINT,
102 .token_size = sizeof(struct sockaddr) + 103 .token_size = sizeof(struct sockaddr) +
103 sizeof(struct iw_quality), 104 sizeof(struct iw_quality),
104 .max_tokens = IW_MAX_SPY, 105 .max_tokens = IW_MAX_SPY,
105 }, 106 },
106 [SIOCSIWTHRSPY - SIOCIWFIRST] = { 107 [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = {
107 .header_type = IW_HEADER_TYPE_POINT, 108 .header_type = IW_HEADER_TYPE_POINT,
108 .token_size = sizeof(struct iw_thrspy), 109 .token_size = sizeof(struct iw_thrspy),
109 .min_tokens = 1, 110 .min_tokens = 1,
110 .max_tokens = 1, 111 .max_tokens = 1,
111 }, 112 },
112 [SIOCGIWTHRSPY - SIOCIWFIRST] = { 113 [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = {
113 .header_type = IW_HEADER_TYPE_POINT, 114 .header_type = IW_HEADER_TYPE_POINT,
114 .token_size = sizeof(struct iw_thrspy), 115 .token_size = sizeof(struct iw_thrspy),
115 .min_tokens = 1, 116 .min_tokens = 1,
116 .max_tokens = 1, 117 .max_tokens = 1,
117 }, 118 },
118 [SIOCSIWAP - SIOCIWFIRST] = { 119 [IW_IOCTL_IDX(SIOCSIWAP)] = {
119 .header_type = IW_HEADER_TYPE_ADDR, 120 .header_type = IW_HEADER_TYPE_ADDR,
120 }, 121 },
121 [SIOCGIWAP - SIOCIWFIRST] = { 122 [IW_IOCTL_IDX(SIOCGIWAP)] = {
122 .header_type = IW_HEADER_TYPE_ADDR, 123 .header_type = IW_HEADER_TYPE_ADDR,
123 .flags = IW_DESCR_FLAG_DUMP, 124 .flags = IW_DESCR_FLAG_DUMP,
124 }, 125 },
125 [SIOCSIWMLME - SIOCIWFIRST] = { 126 [IW_IOCTL_IDX(SIOCSIWMLME)] = {
126 .header_type = IW_HEADER_TYPE_POINT, 127 .header_type = IW_HEADER_TYPE_POINT,
127 .token_size = 1, 128 .token_size = 1,
128 .min_tokens = sizeof(struct iw_mlme), 129 .min_tokens = sizeof(struct iw_mlme),
129 .max_tokens = sizeof(struct iw_mlme), 130 .max_tokens = sizeof(struct iw_mlme),
130 }, 131 },
131 [SIOCGIWAPLIST - SIOCIWFIRST] = { 132 [IW_IOCTL_IDX(SIOCGIWAPLIST)] = {
132 .header_type = IW_HEADER_TYPE_POINT, 133 .header_type = IW_HEADER_TYPE_POINT,
133 .token_size = sizeof(struct sockaddr) + 134 .token_size = sizeof(struct sockaddr) +
134 sizeof(struct iw_quality), 135 sizeof(struct iw_quality),
135 .max_tokens = IW_MAX_AP, 136 .max_tokens = IW_MAX_AP,
136 .flags = IW_DESCR_FLAG_NOMAX, 137 .flags = IW_DESCR_FLAG_NOMAX,
137 }, 138 },
138 [SIOCSIWSCAN - SIOCIWFIRST] = { 139 [IW_IOCTL_IDX(SIOCSIWSCAN)] = {
139 .header_type = IW_HEADER_TYPE_POINT, 140 .header_type = IW_HEADER_TYPE_POINT,
140 .token_size = 1, 141 .token_size = 1,
141 .min_tokens = 0, 142 .min_tokens = 0,
142 .max_tokens = sizeof(struct iw_scan_req), 143 .max_tokens = sizeof(struct iw_scan_req),
143 }, 144 },
144 [SIOCGIWSCAN - SIOCIWFIRST] = { 145 [IW_IOCTL_IDX(SIOCGIWSCAN)] = {
145 .header_type = IW_HEADER_TYPE_POINT, 146 .header_type = IW_HEADER_TYPE_POINT,
146 .token_size = 1, 147 .token_size = 1,
147 .max_tokens = IW_SCAN_MAX_DATA, 148 .max_tokens = IW_SCAN_MAX_DATA,
148 .flags = IW_DESCR_FLAG_NOMAX, 149 .flags = IW_DESCR_FLAG_NOMAX,
149 }, 150 },
150 [SIOCSIWESSID - SIOCIWFIRST] = { 151 [IW_IOCTL_IDX(SIOCSIWESSID)] = {
151 .header_type = IW_HEADER_TYPE_POINT, 152 .header_type = IW_HEADER_TYPE_POINT,
152 .token_size = 1, 153 .token_size = 1,
153 .max_tokens = IW_ESSID_MAX_SIZE, 154 .max_tokens = IW_ESSID_MAX_SIZE,
154 .flags = IW_DESCR_FLAG_EVENT, 155 .flags = IW_DESCR_FLAG_EVENT,
155 }, 156 },
156 [SIOCGIWESSID - SIOCIWFIRST] = { 157 [IW_IOCTL_IDX(SIOCGIWESSID)] = {
157 .header_type = IW_HEADER_TYPE_POINT, 158 .header_type = IW_HEADER_TYPE_POINT,
158 .token_size = 1, 159 .token_size = 1,
159 .max_tokens = IW_ESSID_MAX_SIZE, 160 .max_tokens = IW_ESSID_MAX_SIZE,
160 .flags = IW_DESCR_FLAG_DUMP, 161 .flags = IW_DESCR_FLAG_DUMP,
161 }, 162 },
162 [SIOCSIWNICKN - SIOCIWFIRST] = { 163 [IW_IOCTL_IDX(SIOCSIWNICKN)] = {
163 .header_type = IW_HEADER_TYPE_POINT, 164 .header_type = IW_HEADER_TYPE_POINT,
164 .token_size = 1, 165 .token_size = 1,
165 .max_tokens = IW_ESSID_MAX_SIZE, 166 .max_tokens = IW_ESSID_MAX_SIZE,
166 }, 167 },
167 [SIOCGIWNICKN - SIOCIWFIRST] = { 168 [IW_IOCTL_IDX(SIOCGIWNICKN)] = {
168 .header_type = IW_HEADER_TYPE_POINT, 169 .header_type = IW_HEADER_TYPE_POINT,
169 .token_size = 1, 170 .token_size = 1,
170 .max_tokens = IW_ESSID_MAX_SIZE, 171 .max_tokens = IW_ESSID_MAX_SIZE,
171 }, 172 },
172 [SIOCSIWRATE - SIOCIWFIRST] = { 173 [IW_IOCTL_IDX(SIOCSIWRATE)] = {
173 .header_type = IW_HEADER_TYPE_PARAM, 174 .header_type = IW_HEADER_TYPE_PARAM,
174 }, 175 },
175 [SIOCGIWRATE - SIOCIWFIRST] = { 176 [IW_IOCTL_IDX(SIOCGIWRATE)] = {
176 .header_type = IW_HEADER_TYPE_PARAM, 177 .header_type = IW_HEADER_TYPE_PARAM,
177 }, 178 },
178 [SIOCSIWRTS - SIOCIWFIRST] = { 179 [IW_IOCTL_IDX(SIOCSIWRTS)] = {
179 .header_type = IW_HEADER_TYPE_PARAM, 180 .header_type = IW_HEADER_TYPE_PARAM,
180 }, 181 },
181 [SIOCGIWRTS - SIOCIWFIRST] = { 182 [IW_IOCTL_IDX(SIOCGIWRTS)] = {
182 .header_type = IW_HEADER_TYPE_PARAM, 183 .header_type = IW_HEADER_TYPE_PARAM,
183 }, 184 },
184 [SIOCSIWFRAG - SIOCIWFIRST] = { 185 [IW_IOCTL_IDX(SIOCSIWFRAG)] = {
185 .header_type = IW_HEADER_TYPE_PARAM, 186 .header_type = IW_HEADER_TYPE_PARAM,
186 }, 187 },
187 [SIOCGIWFRAG - SIOCIWFIRST] = { 188 [IW_IOCTL_IDX(SIOCGIWFRAG)] = {
188 .header_type = IW_HEADER_TYPE_PARAM, 189 .header_type = IW_HEADER_TYPE_PARAM,
189 }, 190 },
190 [SIOCSIWTXPOW - SIOCIWFIRST] = { 191 [IW_IOCTL_IDX(SIOCSIWTXPOW)] = {
191 .header_type = IW_HEADER_TYPE_PARAM, 192 .header_type = IW_HEADER_TYPE_PARAM,
192 }, 193 },
193 [SIOCGIWTXPOW - SIOCIWFIRST] = { 194 [IW_IOCTL_IDX(SIOCGIWTXPOW)] = {
194 .header_type = IW_HEADER_TYPE_PARAM, 195 .header_type = IW_HEADER_TYPE_PARAM,
195 }, 196 },
196 [SIOCSIWRETRY - SIOCIWFIRST] = { 197 [IW_IOCTL_IDX(SIOCSIWRETRY)] = {
197 .header_type = IW_HEADER_TYPE_PARAM, 198 .header_type = IW_HEADER_TYPE_PARAM,
198 }, 199 },
199 [SIOCGIWRETRY - SIOCIWFIRST] = { 200 [IW_IOCTL_IDX(SIOCGIWRETRY)] = {
200 .header_type = IW_HEADER_TYPE_PARAM, 201 .header_type = IW_HEADER_TYPE_PARAM,
201 }, 202 },
202 [SIOCSIWENCODE - SIOCIWFIRST] = { 203 [IW_IOCTL_IDX(SIOCSIWENCODE)] = {
203 .header_type = IW_HEADER_TYPE_POINT, 204 .header_type = IW_HEADER_TYPE_POINT,
204 .token_size = 1, 205 .token_size = 1,
205 .max_tokens = IW_ENCODING_TOKEN_MAX, 206 .max_tokens = IW_ENCODING_TOKEN_MAX,
206 .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, 207 .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT,
207 }, 208 },
208 [SIOCGIWENCODE - SIOCIWFIRST] = { 209 [IW_IOCTL_IDX(SIOCGIWENCODE)] = {
209 .header_type = IW_HEADER_TYPE_POINT, 210 .header_type = IW_HEADER_TYPE_POINT,
210 .token_size = 1, 211 .token_size = 1,
211 .max_tokens = IW_ENCODING_TOKEN_MAX, 212 .max_tokens = IW_ENCODING_TOKEN_MAX,
212 .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, 213 .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT,
213 }, 214 },
214 [SIOCSIWPOWER - SIOCIWFIRST] = { 215 [IW_IOCTL_IDX(SIOCSIWPOWER)] = {
215 .header_type = IW_HEADER_TYPE_PARAM, 216 .header_type = IW_HEADER_TYPE_PARAM,
216 }, 217 },
217 [SIOCGIWPOWER - SIOCIWFIRST] = { 218 [IW_IOCTL_IDX(SIOCGIWPOWER)] = {
218 .header_type = IW_HEADER_TYPE_PARAM, 219 .header_type = IW_HEADER_TYPE_PARAM,
219 }, 220 },
220 [SIOCSIWGENIE - SIOCIWFIRST] = { 221 [IW_IOCTL_IDX(SIOCSIWGENIE)] = {
221 .header_type = IW_HEADER_TYPE_POINT, 222 .header_type = IW_HEADER_TYPE_POINT,
222 .token_size = 1, 223 .token_size = 1,
223 .max_tokens = IW_GENERIC_IE_MAX, 224 .max_tokens = IW_GENERIC_IE_MAX,
224 }, 225 },
225 [SIOCGIWGENIE - SIOCIWFIRST] = { 226 [IW_IOCTL_IDX(SIOCGIWGENIE)] = {
226 .header_type = IW_HEADER_TYPE_POINT, 227 .header_type = IW_HEADER_TYPE_POINT,
227 .token_size = 1, 228 .token_size = 1,
228 .max_tokens = IW_GENERIC_IE_MAX, 229 .max_tokens = IW_GENERIC_IE_MAX,
229 }, 230 },
230 [SIOCSIWAUTH - SIOCIWFIRST] = { 231 [IW_IOCTL_IDX(SIOCSIWAUTH)] = {
231 .header_type = IW_HEADER_TYPE_PARAM, 232 .header_type = IW_HEADER_TYPE_PARAM,
232 }, 233 },
233 [SIOCGIWAUTH - SIOCIWFIRST] = { 234 [IW_IOCTL_IDX(SIOCGIWAUTH)] = {
234 .header_type = IW_HEADER_TYPE_PARAM, 235 .header_type = IW_HEADER_TYPE_PARAM,
235 }, 236 },
236 [SIOCSIWENCODEEXT - SIOCIWFIRST] = { 237 [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = {
237 .header_type = IW_HEADER_TYPE_POINT, 238 .header_type = IW_HEADER_TYPE_POINT,
238 .token_size = 1, 239 .token_size = 1,
239 .min_tokens = sizeof(struct iw_encode_ext), 240 .min_tokens = sizeof(struct iw_encode_ext),
240 .max_tokens = sizeof(struct iw_encode_ext) + 241 .max_tokens = sizeof(struct iw_encode_ext) +
241 IW_ENCODING_TOKEN_MAX, 242 IW_ENCODING_TOKEN_MAX,
242 }, 243 },
243 [SIOCGIWENCODEEXT - SIOCIWFIRST] = { 244 [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = {
244 .header_type = IW_HEADER_TYPE_POINT, 245 .header_type = IW_HEADER_TYPE_POINT,
245 .token_size = 1, 246 .token_size = 1,
246 .min_tokens = sizeof(struct iw_encode_ext), 247 .min_tokens = sizeof(struct iw_encode_ext),
247 .max_tokens = sizeof(struct iw_encode_ext) + 248 .max_tokens = sizeof(struct iw_encode_ext) +
248 IW_ENCODING_TOKEN_MAX, 249 IW_ENCODING_TOKEN_MAX,
249 }, 250 },
250 [SIOCSIWPMKSA - SIOCIWFIRST] = { 251 [IW_IOCTL_IDX(SIOCSIWPMKSA)] = {
251 .header_type = IW_HEADER_TYPE_POINT, 252 .header_type = IW_HEADER_TYPE_POINT,
252 .token_size = 1, 253 .token_size = 1,
253 .min_tokens = sizeof(struct iw_pmksa), 254 .min_tokens = sizeof(struct iw_pmksa),
@@ -261,44 +262,44 @@ static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
261 * we know about. 262 * we know about.
262 */ 263 */
263static const struct iw_ioctl_description standard_event[] = { 264static const struct iw_ioctl_description standard_event[] = {
264 [IWEVTXDROP - IWEVFIRST] = { 265 [IW_EVENT_IDX(IWEVTXDROP)] = {
265 .header_type = IW_HEADER_TYPE_ADDR, 266 .header_type = IW_HEADER_TYPE_ADDR,
266 }, 267 },
267 [IWEVQUAL - IWEVFIRST] = { 268 [IW_EVENT_IDX(IWEVQUAL)] = {
268 .header_type = IW_HEADER_TYPE_QUAL, 269 .header_type = IW_HEADER_TYPE_QUAL,
269 }, 270 },
270 [IWEVCUSTOM - IWEVFIRST] = { 271 [IW_EVENT_IDX(IWEVCUSTOM)] = {
271 .header_type = IW_HEADER_TYPE_POINT, 272 .header_type = IW_HEADER_TYPE_POINT,
272 .token_size = 1, 273 .token_size = 1,
273 .max_tokens = IW_CUSTOM_MAX, 274 .max_tokens = IW_CUSTOM_MAX,
274 }, 275 },
275 [IWEVREGISTERED - IWEVFIRST] = { 276 [IW_EVENT_IDX(IWEVREGISTERED)] = {
276 .header_type = IW_HEADER_TYPE_ADDR, 277 .header_type = IW_HEADER_TYPE_ADDR,
277 }, 278 },
278 [IWEVEXPIRED - IWEVFIRST] = { 279 [IW_EVENT_IDX(IWEVEXPIRED)] = {
279 .header_type = IW_HEADER_TYPE_ADDR, 280 .header_type = IW_HEADER_TYPE_ADDR,
280 }, 281 },
281 [IWEVGENIE - IWEVFIRST] = { 282 [IW_EVENT_IDX(IWEVGENIE)] = {
282 .header_type = IW_HEADER_TYPE_POINT, 283 .header_type = IW_HEADER_TYPE_POINT,
283 .token_size = 1, 284 .token_size = 1,
284 .max_tokens = IW_GENERIC_IE_MAX, 285 .max_tokens = IW_GENERIC_IE_MAX,
285 }, 286 },
286 [IWEVMICHAELMICFAILURE - IWEVFIRST] = { 287 [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = {
287 .header_type = IW_HEADER_TYPE_POINT, 288 .header_type = IW_HEADER_TYPE_POINT,
288 .token_size = 1, 289 .token_size = 1,
289 .max_tokens = sizeof(struct iw_michaelmicfailure), 290 .max_tokens = sizeof(struct iw_michaelmicfailure),
290 }, 291 },
291 [IWEVASSOCREQIE - IWEVFIRST] = { 292 [IW_EVENT_IDX(IWEVASSOCREQIE)] = {
292 .header_type = IW_HEADER_TYPE_POINT, 293 .header_type = IW_HEADER_TYPE_POINT,
293 .token_size = 1, 294 .token_size = 1,
294 .max_tokens = IW_GENERIC_IE_MAX, 295 .max_tokens = IW_GENERIC_IE_MAX,
295 }, 296 },
296 [IWEVASSOCRESPIE - IWEVFIRST] = { 297 [IW_EVENT_IDX(IWEVASSOCRESPIE)] = {
297 .header_type = IW_HEADER_TYPE_POINT, 298 .header_type = IW_HEADER_TYPE_POINT,
298 .token_size = 1, 299 .token_size = 1,
299 .max_tokens = IW_GENERIC_IE_MAX, 300 .max_tokens = IW_GENERIC_IE_MAX,
300 }, 301 },
301 [IWEVPMKIDCAND - IWEVFIRST] = { 302 [IW_EVENT_IDX(IWEVPMKIDCAND)] = {
302 .header_type = IW_HEADER_TYPE_POINT, 303 .header_type = IW_HEADER_TYPE_POINT,
303 .token_size = 1, 304 .token_size = 1,
304 .max_tokens = sizeof(struct iw_pmkid_cand), 305 .max_tokens = sizeof(struct iw_pmkid_cand),
@@ -449,11 +450,11 @@ void wireless_send_event(struct net_device * dev,
449 450
450 /* Get the description of the Event */ 451 /* Get the description of the Event */
451 if (cmd <= SIOCIWLAST) { 452 if (cmd <= SIOCIWLAST) {
452 cmd_index = cmd - SIOCIWFIRST; 453 cmd_index = IW_IOCTL_IDX(cmd);
453 if (cmd_index < standard_ioctl_num) 454 if (cmd_index < standard_ioctl_num)
454 descr = &(standard_ioctl[cmd_index]); 455 descr = &(standard_ioctl[cmd_index]);
455 } else { 456 } else {
456 cmd_index = cmd - IWEVFIRST; 457 cmd_index = IW_EVENT_IDX(cmd);
457 if (cmd_index < standard_event_num) 458 if (cmd_index < standard_event_num)
458 descr = &(standard_event[cmd_index]); 459 descr = &(standard_event[cmd_index]);
459 } 460 }
@@ -662,7 +663,7 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
662 return NULL; 663 return NULL;
663 664
664 /* Try as a standard command */ 665 /* Try as a standard command */
665 index = cmd - SIOCIWFIRST; 666 index = IW_IOCTL_IDX(cmd);
666 if (index < handlers->num_standard) 667 if (index < handlers->num_standard)
667 return handlers->standard[index]; 668 return handlers->standard[index];
668 669
@@ -954,9 +955,9 @@ static int ioctl_standard_call(struct net_device * dev,
954 int ret = -EINVAL; 955 int ret = -EINVAL;
955 956
956 /* Get the description of the IOCTL */ 957 /* Get the description of the IOCTL */
957 if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) 958 if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num)
958 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
959 descr = &(standard_ioctl[cmd - SIOCIWFIRST]); 960 descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]);
960 961
961 /* Check if we have a pointer to user space data or not */ 962 /* Check if we have a pointer to user space data or not */
962 if (descr->header_type != IW_HEADER_TYPE_POINT) { 963 if (descr->header_type != IW_HEADER_TYPE_POINT) {
@@ -1012,7 +1013,7 @@ static int compat_standard_call(struct net_device *dev,
1012 struct iw_point iwp; 1013 struct iw_point iwp;
1013 int err; 1014 int err;
1014 1015
1015 descr = standard_ioctl + (cmd - SIOCIWFIRST); 1016 descr = standard_ioctl + IW_IOCTL_IDX(cmd);
1016 1017
1017 if (descr->header_type != IW_HEADER_TYPE_POINT) 1018 if (descr->header_type != IW_HEADER_TYPE_POINT)
1018 return ioctl_standard_call(dev, iwr, cmd, info, handler); 1019 return ioctl_standard_call(dev, iwr, cmd, info, handler);
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c
index a3c2277de9e5..3feb28e41c53 100644
--- a/net/wireless/wext-priv.c
+++ b/net/wireless/wext-priv.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * (As all part of the Linux kernel, this file is GPL) 8 * (As all part of the Linux kernel, this file is GPL)
9 */ 9 */
10#include <linux/slab.h>
10#include <linux/wireless.h> 11#include <linux/wireless.h>
11#include <linux/netdevice.h> 12#include <linux/netdevice.h>
12#include <net/iw_handler.h> 13#include <net/iw_handler.h>
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 5615a8802536..d5c6140f4cb8 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/etherdevice.h> 8#include <linux/etherdevice.h>
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/slab.h>
10#include <net/cfg80211.h> 11#include <net/cfg80211.h>
11#include "wext-compat.h" 12#include "wext-compat.h"
12#include "nl80211.h" 13#include "nl80211.h"
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 9796f3ed1edb..cbddd0cb83f1 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -47,6 +47,7 @@
47#include <linux/netdevice.h> 47#include <linux/netdevice.h>
48#include <linux/if_arp.h> 48#include <linux/if_arp.h>
49#include <linux/skbuff.h> 49#include <linux/skbuff.h>
50#include <linux/slab.h>
50#include <net/sock.h> 51#include <net/sock.h>
51#include <net/tcp_states.h> 52#include <net/tcp_states.h>
52#include <asm/uaccess.h> 53#include <asm/uaccess.h>
@@ -82,6 +83,41 @@ struct compat_x25_subscrip_struct {
82}; 83};
83#endif 84#endif
84 85
86
87int x25_parse_address_block(struct sk_buff *skb,
88 struct x25_address *called_addr,
89 struct x25_address *calling_addr)
90{
91 unsigned char len;
92 int needed;
93 int rc;
94
95 if (skb->len < 1) {
96 /* packet has no address block */
97 rc = 0;
98 goto empty;
99 }
100
101 len = *skb->data;
102 needed = 1 + (len >> 4) + (len & 0x0f);
103
104 if (skb->len < needed) {
105 /* packet is too short to hold the addresses it claims
106 to hold */
107 rc = -1;
108 goto empty;
109 }
110
111 return x25_addr_ntoa(skb->data, called_addr, calling_addr);
112
113empty:
114 *called_addr->x25_addr = 0;
115 *calling_addr->x25_addr = 0;
116
117 return rc;
118}
119
120
85int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, 121int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
86 struct x25_address *calling_addr) 122 struct x25_address *calling_addr)
87{ 123{
@@ -553,7 +589,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
553 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; 589 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
554 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; 590 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
555 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; 591 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
556 x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; 592 x25->facilities.throughput = 0; /* by default don't negotiate
593 throughput */
557 x25->facilities.reverse = X25_DEFAULT_REVERSE; 594 x25->facilities.reverse = X25_DEFAULT_REVERSE;
558 x25->dte_facilities.calling_len = 0; 595 x25->dte_facilities.calling_len = 0;
559 x25->dte_facilities.called_len = 0; 596 x25->dte_facilities.called_len = 0;
@@ -921,16 +958,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
921 /* 958 /*
922 * Extract the X.25 addresses and convert them to ASCII strings, 959 * Extract the X.25 addresses and convert them to ASCII strings,
923 * and remove them. 960 * and remove them.
961 *
962 * Address block is mandatory in call request packets
924 */ 963 */
925 addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); 964 addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
965 if (addr_len <= 0)
966 goto out_clear_request;
926 skb_pull(skb, addr_len); 967 skb_pull(skb, addr_len);
927 968
928 /* 969 /*
929 * Get the length of the facilities, skip past them for the moment 970 * Get the length of the facilities, skip past them for the moment
930 * get the call user data because this is needed to determine 971 * get the call user data because this is needed to determine
931 * the correct listener 972 * the correct listener
973 *
974 * Facilities length is mandatory in call request packets
932 */ 975 */
976 if (skb->len < 1)
977 goto out_clear_request;
933 len = skb->data[0] + 1; 978 len = skb->data[0] + 1;
979 if (skb->len < len)
980 goto out_clear_request;
934 skb_pull(skb,len); 981 skb_pull(skb,len);
935 982
936 /* 983 /*
@@ -1414,9 +1461,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1414 if (facilities.winsize_in < 1 || 1461 if (facilities.winsize_in < 1 ||
1415 facilities.winsize_in > 127) 1462 facilities.winsize_in > 127)
1416 break; 1463 break;
1417 if (facilities.throughput < 0x03 || 1464 if (facilities.throughput) {
1418 facilities.throughput > 0xDD) 1465 int out = facilities.throughput & 0xf0;
1419 break; 1466 int in = facilities.throughput & 0x0f;
1467 if (!out)
1468 facilities.throughput |=
1469 X25_DEFAULT_THROUGHPUT << 4;
1470 else if (out < 0x30 || out > 0xD0)
1471 break;
1472 if (!in)
1473 facilities.throughput |=
1474 X25_DEFAULT_THROUGHPUT;
1475 else if (in < 0x03 || in > 0x0D)
1476 break;
1477 }
1420 if (facilities.reverse && 1478 if (facilities.reverse &&
1421 (facilities.reverse & 0x81) != 0x81) 1479 (facilities.reverse & 0x81) != 0x81)
1422 break; 1480 break;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 52e304212241..b9ef682230a0 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -20,6 +20,7 @@
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
23#include <net/sock.h> 24#include <net/sock.h>
24#include <linux/if_arp.h> 25#include <linux/if_arp.h>
25#include <net/x25.h> 26#include <net/x25.h>
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index a21f6646eb3a..771bab00754b 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
36{ 36{
37 unsigned char *p = skb->data; 37 unsigned char *p = skb->data;
38 unsigned int len = *p++; 38 unsigned int len;
39 39
40 *vc_fac_mask = 0; 40 *vc_fac_mask = 0;
41 41
@@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
50 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); 50 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
51 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); 51 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
52 52
53 if (skb->len < 1)
54 return 0;
55
56 len = *p++;
57
58 if (len >= skb->len)
59 return -1;
60
53 while (len > 0) { 61 while (len > 0) {
54 switch (*p & X25_FAC_CLASS_MASK) { 62 switch (*p & X25_FAC_CLASS_MASK) {
55 case X25_FAC_CLASS_A: 63 case X25_FAC_CLASS_A:
@@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
247 memcpy(new, ours, sizeof(*new)); 255 memcpy(new, ours, sizeof(*new));
248 256
249 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); 257 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
258 if (len < 0)
259 return len;
250 260
251 /* 261 /*
252 * They want reverse charging, we won't accept it. 262 * They want reverse charging, we won't accept it.
@@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
259 new->reverse = theirs.reverse; 269 new->reverse = theirs.reverse;
260 270
261 if (theirs.throughput) { 271 if (theirs.throughput) {
262 if (theirs.throughput < ours->throughput) { 272 int theirs_in = theirs.throughput & 0x0f;
263 SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); 273 int theirs_out = theirs.throughput & 0xf0;
264 new->throughput = theirs.throughput; 274 int ours_in = ours->throughput & 0x0f;
275 int ours_out = ours->throughput & 0xf0;
276 if (!ours_in || theirs_in < ours_in) {
277 SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
278 new->throughput = (new->throughput & 0xf0) | theirs_in;
279 }
280 if (!ours_out || theirs_out < ours_out) {
281 SOCK_DEBUG(sk,
282 "X.25: outbound throughput negotiated\n");
283 new->throughput = (new->throughput & 0x0f) | theirs_out;
265 } 284 }
266 } 285 }
267 286
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
index 056a55f3a871..25a810793968 100644
--- a/net/x25/x25_forward.c
+++ b/net/x25/x25_forward.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/if_arp.h> 11#include <linux/if_arp.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/slab.h>
13#include <net/x25.h> 14#include <net/x25.h>
14 15
15LIST_HEAD(x25_forward_list); 16LIST_HEAD(x25_forward_list);
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 96d922783547..372ac226e648 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -23,6 +23,7 @@
23 * i-frames. 23 * i-frames.
24 */ 24 */
25 25
26#include <linux/slab.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/string.h> 29#include <linux/string.h>
@@ -89,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
89static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) 90static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
90{ 91{
91 struct x25_address source_addr, dest_addr; 92 struct x25_address source_addr, dest_addr;
93 int len;
92 94
93 switch (frametype) { 95 switch (frametype) {
94 case X25_CALL_ACCEPTED: { 96 case X25_CALL_ACCEPTED: {
@@ -106,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
106 * Parse the data in the frame. 108 * Parse the data in the frame.
107 */ 109 */
108 skb_pull(skb, X25_STD_MIN_LEN); 110 skb_pull(skb, X25_STD_MIN_LEN);
109 skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); 111
110 skb_pull(skb, 112 len = x25_parse_address_block(skb, &source_addr,
111 x25_parse_facilities(skb, &x25->facilities, 113 &dest_addr);
114 if (len > 0)
115 skb_pull(skb, len);
116
117 len = x25_parse_facilities(skb, &x25->facilities,
112 &x25->dte_facilities, 118 &x25->dte_facilities,
113 &x25->vc_facil_mask)); 119 &x25->vc_facil_mask);
120 if (len > 0)
121 skb_pull(skb, len);
114 /* 122 /*
115 * Copy any Call User Data. 123 * Copy any Call User Data.
116 */ 124 */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index e4e1b6e49538..73e7b954ad28 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/jiffies.h> 25#include <linux/jiffies.h>
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <linux/slab.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
28#include <linux/skbuff.h> 29#include <linux/skbuff.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c
index 2b96b52114d6..52351a26b6fc 100644
--- a/net/x25/x25_out.c
+++ b/net/x25/x25_out.c
@@ -22,6 +22,7 @@
22 * needed cleaned seq-number fields. 22 * needed cleaned seq-number fields.
23 */ 23 */
24 24
25#include <linux/slab.h>
25#include <linux/socket.h> 26#include <linux/socket.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/string.h> 28#include <linux/string.h>
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index b95fae9ab393..97d77c532d8c 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/slab.h>
22#include <net/x25.h> 23#include <net/x25.h>
23 24
24LIST_HEAD(x25_route_list); 25LIST_HEAD(x25_route_list);
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 352b32d216fc..dc20cf12f39b 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -23,6 +23,7 @@
23 * restriction on response. 23 * restriction on response.
24 */ 24 */
25 25
26#include <linux/slab.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/string.h> 28#include <linux/string.h>
28#include <linux/skbuff.h> 29#include <linux/skbuff.h>
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 0fc5ff66d1fa..fc91ad7ee26e 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -17,11 +17,11 @@
17 17
18#include <linux/crypto.h> 18#include <linux/crypto.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/gfp.h>
21#include <linux/list.h> 20#include <linux/list.h>
22#include <linux/module.h> 21#include <linux/module.h>
23#include <linux/mutex.h> 22#include <linux/mutex.h>
24#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/slab.h>
25#include <linux/smp.h> 25#include <linux/smp.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <net/ip.h> 27#include <net/ip.h>
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index b9fe13138c07..6a329158bdfa 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -14,6 +14,7 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/netfilter.h> 15#include <linux/netfilter.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/slab.h>
17#include <linux/spinlock.h> 18#include <linux/spinlock.h>
18#include <net/dst.h> 19#include <net/dst.h>
19#include <net/xfrm.h> 20#include <net/xfrm.h>
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 843e066649cb..7430ac26ec49 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -37,6 +37,8 @@
37DEFINE_MUTEX(xfrm_cfg_mutex); 37DEFINE_MUTEX(xfrm_cfg_mutex);
38EXPORT_SYMBOL(xfrm_cfg_mutex); 38EXPORT_SYMBOL(xfrm_cfg_mutex);
39 39
40static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
41static struct dst_entry *xfrm_policy_sk_bundles;
40static DEFINE_RWLOCK(xfrm_policy_lock); 42static DEFINE_RWLOCK(xfrm_policy_lock);
41 43
42static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 44static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
@@ -44,12 +46,10 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
44 46
45static struct kmem_cache *xfrm_dst_cache __read_mostly; 47static struct kmem_cache *xfrm_dst_cache __read_mostly;
46 48
47static HLIST_HEAD(xfrm_policy_gc_list);
48static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
49
50static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 49static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
51static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
52static void xfrm_init_pmtu(struct dst_entry *dst); 51static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst);
53 53
54static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 54static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
55 int dir); 55 int dir);
@@ -156,7 +156,7 @@ static void xfrm_policy_timer(unsigned long data)
156 156
157 read_lock(&xp->lock); 157 read_lock(&xp->lock);
158 158
159 if (xp->walk.dead) 159 if (unlikely(xp->walk.dead))
160 goto out; 160 goto out;
161 161
162 dir = xfrm_policy_id2dir(xp->index); 162 dir = xfrm_policy_id2dir(xp->index);
@@ -216,6 +216,35 @@ expired:
216 xfrm_pol_put(xp); 216 xfrm_pol_put(xp);
217} 217}
218 218
219static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
220{
221 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
222
223 if (unlikely(pol->walk.dead))
224 flo = NULL;
225 else
226 xfrm_pol_hold(pol);
227
228 return flo;
229}
230
231static int xfrm_policy_flo_check(struct flow_cache_object *flo)
232{
233 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
234
235 return !pol->walk.dead;
236}
237
238static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
239{
240 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
241}
242
243static const struct flow_cache_ops xfrm_policy_fc_ops = {
244 .get = xfrm_policy_flo_get,
245 .check = xfrm_policy_flo_check,
246 .delete = xfrm_policy_flo_delete,
247};
219 248
220/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 249/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
221 * SPD calls. 250 * SPD calls.
@@ -236,6 +265,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
236 atomic_set(&policy->refcnt, 1); 265 atomic_set(&policy->refcnt, 1);
237 setup_timer(&policy->timer, xfrm_policy_timer, 266 setup_timer(&policy->timer, xfrm_policy_timer,
238 (unsigned long)policy); 267 (unsigned long)policy);
268 policy->flo.ops = &xfrm_policy_fc_ops;
239 } 269 }
240 return policy; 270 return policy;
241} 271}
@@ -247,8 +277,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
247{ 277{
248 BUG_ON(!policy->walk.dead); 278 BUG_ON(!policy->walk.dead);
249 279
250 BUG_ON(policy->bundles);
251
252 if (del_timer(&policy->timer)) 280 if (del_timer(&policy->timer))
253 BUG(); 281 BUG();
254 282
@@ -257,63 +285,20 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
257} 285}
258EXPORT_SYMBOL(xfrm_policy_destroy); 286EXPORT_SYMBOL(xfrm_policy_destroy);
259 287
260static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
261{
262 struct dst_entry *dst;
263
264 while ((dst = policy->bundles) != NULL) {
265 policy->bundles = dst->next;
266 dst_free(dst);
267 }
268
269 if (del_timer(&policy->timer))
270 atomic_dec(&policy->refcnt);
271
272 if (atomic_read(&policy->refcnt) > 1)
273 flow_cache_flush();
274
275 xfrm_pol_put(policy);
276}
277
278static void xfrm_policy_gc_task(struct work_struct *work)
279{
280 struct xfrm_policy *policy;
281 struct hlist_node *entry, *tmp;
282 struct hlist_head gc_list;
283
284 spin_lock_bh(&xfrm_policy_gc_lock);
285 gc_list.first = xfrm_policy_gc_list.first;
286 INIT_HLIST_HEAD(&xfrm_policy_gc_list);
287 spin_unlock_bh(&xfrm_policy_gc_lock);
288
289 hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
290 xfrm_policy_gc_kill(policy);
291}
292static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
293
294/* Rule must be locked. Release descentant resources, announce 288/* Rule must be locked. Release descentant resources, announce
295 * entry dead. The rule must be unlinked from lists to the moment. 289 * entry dead. The rule must be unlinked from lists to the moment.
296 */ 290 */
297 291
298static void xfrm_policy_kill(struct xfrm_policy *policy) 292static void xfrm_policy_kill(struct xfrm_policy *policy)
299{ 293{
300 int dead;
301
302 write_lock_bh(&policy->lock);
303 dead = policy->walk.dead;
304 policy->walk.dead = 1; 294 policy->walk.dead = 1;
305 write_unlock_bh(&policy->lock);
306 295
307 if (unlikely(dead)) { 296 atomic_inc(&policy->genid);
308 WARN_ON(1);
309 return;
310 }
311 297
312 spin_lock_bh(&xfrm_policy_gc_lock); 298 if (del_timer(&policy->timer))
313 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); 299 xfrm_pol_put(policy);
314 spin_unlock_bh(&xfrm_policy_gc_lock);
315 300
316 schedule_work(&xfrm_policy_gc_work); 301 xfrm_pol_put(policy);
317} 302}
318 303
319static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 304static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
@@ -555,7 +540,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
555 struct xfrm_policy *delpol; 540 struct xfrm_policy *delpol;
556 struct hlist_head *chain; 541 struct hlist_head *chain;
557 struct hlist_node *entry, *newpos; 542 struct hlist_node *entry, *newpos;
558 struct dst_entry *gc_list;
559 u32 mark = policy->mark.v & policy->mark.m; 543 u32 mark = policy->mark.v & policy->mark.m;
560 544
561 write_lock_bh(&xfrm_policy_lock); 545 write_lock_bh(&xfrm_policy_lock);
@@ -605,34 +589,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
605 else if (xfrm_bydst_should_resize(net, dir, NULL)) 589 else if (xfrm_bydst_should_resize(net, dir, NULL))
606 schedule_work(&net->xfrm.policy_hash_work); 590 schedule_work(&net->xfrm.policy_hash_work);
607 591
608 read_lock_bh(&xfrm_policy_lock);
609 gc_list = NULL;
610 entry = &policy->bydst;
611 hlist_for_each_entry_continue(policy, entry, bydst) {
612 struct dst_entry *dst;
613
614 write_lock(&policy->lock);
615 dst = policy->bundles;
616 if (dst) {
617 struct dst_entry *tail = dst;
618 while (tail->next)
619 tail = tail->next;
620 tail->next = gc_list;
621 gc_list = dst;
622
623 policy->bundles = NULL;
624 }
625 write_unlock(&policy->lock);
626 }
627 read_unlock_bh(&xfrm_policy_lock);
628
629 while (gc_list) {
630 struct dst_entry *dst = gc_list;
631
632 gc_list = dst->next;
633 dst_free(dst);
634 }
635
636 return 0; 592 return 0;
637} 593}
638EXPORT_SYMBOL(xfrm_policy_insert); 594EXPORT_SYMBOL(xfrm_policy_insert);
@@ -671,10 +627,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
671 } 627 }
672 write_unlock_bh(&xfrm_policy_lock); 628 write_unlock_bh(&xfrm_policy_lock);
673 629
674 if (ret && delete) { 630 if (ret && delete)
675 atomic_inc(&flow_cache_genid);
676 xfrm_policy_kill(ret); 631 xfrm_policy_kill(ret);
677 }
678 return ret; 632 return ret;
679} 633}
680EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 634EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
@@ -713,10 +667,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
713 } 667 }
714 write_unlock_bh(&xfrm_policy_lock); 668 write_unlock_bh(&xfrm_policy_lock);
715 669
716 if (ret && delete) { 670 if (ret && delete)
717 atomic_inc(&flow_cache_genid);
718 xfrm_policy_kill(ret); 671 xfrm_policy_kill(ret);
719 }
720 return ret; 672 return ret;
721} 673}
722EXPORT_SYMBOL(xfrm_policy_byid); 674EXPORT_SYMBOL(xfrm_policy_byid);
@@ -776,7 +728,6 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
776int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 728int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
777{ 729{
778 int dir, err = 0, cnt = 0; 730 int dir, err = 0, cnt = 0;
779 struct xfrm_policy *dp;
780 731
781 write_lock_bh(&xfrm_policy_lock); 732 write_lock_bh(&xfrm_policy_lock);
782 733
@@ -794,10 +745,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
794 &net->xfrm.policy_inexact[dir], bydst) { 745 &net->xfrm.policy_inexact[dir], bydst) {
795 if (pol->type != type) 746 if (pol->type != type)
796 continue; 747 continue;
797 dp = __xfrm_policy_unlink(pol, dir); 748 __xfrm_policy_unlink(pol, dir);
798 write_unlock_bh(&xfrm_policy_lock); 749 write_unlock_bh(&xfrm_policy_lock);
799 if (dp) 750 cnt++;
800 cnt++;
801 751
802 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 752 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
803 audit_info->sessionid, 753 audit_info->sessionid,
@@ -816,10 +766,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
816 bydst) { 766 bydst) {
817 if (pol->type != type) 767 if (pol->type != type)
818 continue; 768 continue;
819 dp = __xfrm_policy_unlink(pol, dir); 769 __xfrm_policy_unlink(pol, dir);
820 write_unlock_bh(&xfrm_policy_lock); 770 write_unlock_bh(&xfrm_policy_lock);
821 if (dp) 771 cnt++;
822 cnt++;
823 772
824 xfrm_audit_policy_delete(pol, 1, 773 xfrm_audit_policy_delete(pol, 1,
825 audit_info->loginuid, 774 audit_info->loginuid,
@@ -835,7 +784,6 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
835 } 784 }
836 if (!cnt) 785 if (!cnt)
837 err = -ESRCH; 786 err = -ESRCH;
838 atomic_inc(&flow_cache_genid);
839out: 787out:
840 write_unlock_bh(&xfrm_policy_lock); 788 write_unlock_bh(&xfrm_policy_lock);
841 return err; 789 return err;
@@ -989,32 +937,37 @@ fail:
989 return ret; 937 return ret;
990} 938}
991 939
992static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, 940static struct xfrm_policy *
993 u8 dir, void **objp, atomic_t **obj_refp) 941__xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
994{ 942{
943#ifdef CONFIG_XFRM_SUB_POLICY
995 struct xfrm_policy *pol; 944 struct xfrm_policy *pol;
996 int err = 0;
997 945
998#ifdef CONFIG_XFRM_SUB_POLICY
999 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 946 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1000 if (IS_ERR(pol)) { 947 if (pol != NULL)
1001 err = PTR_ERR(pol); 948 return pol;
1002 pol = NULL;
1003 }
1004 if (pol || err)
1005 goto end;
1006#endif
1007 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1008 if (IS_ERR(pol)) {
1009 err = PTR_ERR(pol);
1010 pol = NULL;
1011 }
1012#ifdef CONFIG_XFRM_SUB_POLICY
1013end:
1014#endif 949#endif
1015 if ((*objp = (void *) pol) != NULL) 950 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1016 *obj_refp = &pol->refcnt; 951}
1017 return err; 952
953static struct flow_cache_object *
954xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
955 u8 dir, struct flow_cache_object *old_obj, void *ctx)
956{
957 struct xfrm_policy *pol;
958
959 if (old_obj)
960 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
961
962 pol = __xfrm_policy_lookup(net, fl, family, dir);
963 if (IS_ERR_OR_NULL(pol))
964 return ERR_CAST(pol);
965
966 /* Resolver returns two references:
967 * one for cache and one for caller of flow_cache_lookup() */
968 xfrm_pol_hold(pol);
969
970 return &pol->flo;
1018} 971}
1019 972
1020static inline int policy_to_flow_dir(int dir) 973static inline int policy_to_flow_dir(int dir)
@@ -1104,8 +1057,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1104 pol = __xfrm_policy_unlink(pol, dir); 1057 pol = __xfrm_policy_unlink(pol, dir);
1105 write_unlock_bh(&xfrm_policy_lock); 1058 write_unlock_bh(&xfrm_policy_lock);
1106 if (pol) { 1059 if (pol) {
1107 if (dir < XFRM_POLICY_MAX)
1108 atomic_inc(&flow_cache_genid);
1109 xfrm_policy_kill(pol); 1060 xfrm_policy_kill(pol);
1110 return 0; 1061 return 0;
1111 } 1062 }
@@ -1132,6 +1083,9 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1132 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1083 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1133 } 1084 }
1134 if (old_pol) 1085 if (old_pol)
1086 /* Unlinking succeeds always. This is the only function
1087 * allowed to delete or replace socket policy.
1088 */
1135 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1089 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1136 write_unlock_bh(&xfrm_policy_lock); 1090 write_unlock_bh(&xfrm_policy_lock);
1137 1091
@@ -1300,18 +1254,6 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
1300 * still valid. 1254 * still valid.
1301 */ 1255 */
1302 1256
1303static struct dst_entry *
1304xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
1305{
1306 struct dst_entry *x;
1307 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1308 if (unlikely(afinfo == NULL))
1309 return ERR_PTR(-EINVAL);
1310 x = afinfo->find_bundle(fl, policy);
1311 xfrm_policy_put_afinfo(afinfo);
1312 return x;
1313}
1314
1315static inline int xfrm_get_tos(struct flowi *fl, int family) 1257static inline int xfrm_get_tos(struct flowi *fl, int family)
1316{ 1258{
1317 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1259 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -1327,6 +1269,54 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
1327 return tos; 1269 return tos;
1328} 1270}
1329 1271
1272static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1273{
1274 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1275 struct dst_entry *dst = &xdst->u.dst;
1276
1277 if (xdst->route == NULL) {
1278 /* Dummy bundle - if it has xfrms we were not
1279 * able to build bundle as template resolution failed.
1280 * It means we need to try again resolving. */
1281 if (xdst->num_xfrms > 0)
1282 return NULL;
1283 } else {
1284 /* Real bundle */
1285 if (stale_bundle(dst))
1286 return NULL;
1287 }
1288
1289 dst_hold(dst);
1290 return flo;
1291}
1292
1293static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1294{
1295 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1296 struct dst_entry *dst = &xdst->u.dst;
1297
1298 if (!xdst->route)
1299 return 0;
1300 if (stale_bundle(dst))
1301 return 0;
1302
1303 return 1;
1304}
1305
1306static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1307{
1308 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1309 struct dst_entry *dst = &xdst->u.dst;
1310
1311 dst_free(dst);
1312}
1313
1314static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1315 .get = xfrm_bundle_flo_get,
1316 .check = xfrm_bundle_flo_check,
1317 .delete = xfrm_bundle_flo_delete,
1318};
1319
1330static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1320static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1331{ 1321{
1332 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1322 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -1349,9 +1339,10 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1349 BUG(); 1339 BUG();
1350 } 1340 }
1351 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1341 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
1352
1353 xfrm_policy_put_afinfo(afinfo); 1342 xfrm_policy_put_afinfo(afinfo);
1354 1343
1344 xdst->flo.ops = &xfrm_bundle_fc_ops;
1345
1355 return xdst; 1346 return xdst;
1356} 1347}
1357 1348
@@ -1389,6 +1380,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1389 return err; 1380 return err;
1390} 1381}
1391 1382
1383
1392/* Allocate chain of dst_entry's, attach known xfrm's, calculate 1384/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1393 * all the metrics... Shortly, bundle a bundle. 1385 * all the metrics... Shortly, bundle a bundle.
1394 */ 1386 */
@@ -1452,7 +1444,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1452 dst_hold(dst); 1444 dst_hold(dst);
1453 1445
1454 dst1->xfrm = xfrm[i]; 1446 dst1->xfrm = xfrm[i];
1455 xdst->genid = xfrm[i]->genid; 1447 xdst->xfrm_genid = xfrm[i]->genid;
1456 1448
1457 dst1->obsolete = -1; 1449 dst1->obsolete = -1;
1458 dst1->flags |= DST_HOST; 1450 dst1->flags |= DST_HOST;
@@ -1545,7 +1537,186 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
1545#endif 1537#endif
1546} 1538}
1547 1539
1548static int stale_bundle(struct dst_entry *dst); 1540static int xfrm_expand_policies(struct flowi *fl, u16 family,
1541 struct xfrm_policy **pols,
1542 int *num_pols, int *num_xfrms)
1543{
1544 int i;
1545
1546 if (*num_pols == 0 || !pols[0]) {
1547 *num_pols = 0;
1548 *num_xfrms = 0;
1549 return 0;
1550 }
1551 if (IS_ERR(pols[0]))
1552 return PTR_ERR(pols[0]);
1553
1554 *num_xfrms = pols[0]->xfrm_nr;
1555
1556#ifdef CONFIG_XFRM_SUB_POLICY
1557 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1558 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1559 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1560 XFRM_POLICY_TYPE_MAIN,
1561 fl, family,
1562 XFRM_POLICY_OUT);
1563 if (pols[1]) {
1564 if (IS_ERR(pols[1])) {
1565 xfrm_pols_put(pols, *num_pols);
1566 return PTR_ERR(pols[1]);
1567 }
1568 (*num_pols) ++;
1569 (*num_xfrms) += pols[1]->xfrm_nr;
1570 }
1571 }
1572#endif
1573 for (i = 0; i < *num_pols; i++) {
1574 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1575 *num_xfrms = -1;
1576 break;
1577 }
1578 }
1579
1580 return 0;
1581
1582}
1583
1584static struct xfrm_dst *
1585xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1586 struct flowi *fl, u16 family,
1587 struct dst_entry *dst_orig)
1588{
1589 struct net *net = xp_net(pols[0]);
1590 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1591 struct dst_entry *dst;
1592 struct xfrm_dst *xdst;
1593 int err;
1594
1595 /* Try to instantiate a bundle */
1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1597 if (err < 0) {
1598 if (err != -EAGAIN)
1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1600 return ERR_PTR(err);
1601 }
1602
1603 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1604 if (IS_ERR(dst)) {
1605 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1606 return ERR_CAST(dst);
1607 }
1608
1609 xdst = (struct xfrm_dst *)dst;
1610 xdst->num_xfrms = err;
1611 if (num_pols > 1)
1612 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1613 else
1614 err = xfrm_dst_update_origin(dst, fl);
1615 if (unlikely(err)) {
1616 dst_free(dst);
1617 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1618 return ERR_PTR(err);
1619 }
1620
1621 xdst->num_pols = num_pols;
1622 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1623 xdst->policy_genid = atomic_read(&pols[0]->genid);
1624
1625 return xdst;
1626}
1627
1628static struct flow_cache_object *
1629xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
1630 struct flow_cache_object *oldflo, void *ctx)
1631{
1632 struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1633 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1634 struct xfrm_dst *xdst, *new_xdst;
1635 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1636
1637 /* Check if the policies from old bundle are usable */
1638 xdst = NULL;
1639 if (oldflo) {
1640 xdst = container_of(oldflo, struct xfrm_dst, flo);
1641 num_pols = xdst->num_pols;
1642 num_xfrms = xdst->num_xfrms;
1643 pol_dead = 0;
1644 for (i = 0; i < num_pols; i++) {
1645 pols[i] = xdst->pols[i];
1646 pol_dead |= pols[i]->walk.dead;
1647 }
1648 if (pol_dead) {
1649 dst_free(&xdst->u.dst);
1650 xdst = NULL;
1651 num_pols = 0;
1652 num_xfrms = 0;
1653 oldflo = NULL;
1654 }
1655 }
1656
1657 /* Resolve policies to use if we couldn't get them from
1658 * previous cache entry */
1659 if (xdst == NULL) {
1660 num_pols = 1;
1661 pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
1662 err = xfrm_expand_policies(fl, family, pols,
1663 &num_pols, &num_xfrms);
1664 if (err < 0)
1665 goto inc_error;
1666 if (num_pols == 0)
1667 return NULL;
1668 if (num_xfrms <= 0)
1669 goto make_dummy_bundle;
1670 }
1671
1672 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1673 if (IS_ERR(new_xdst)) {
1674 err = PTR_ERR(new_xdst);
1675 if (err != -EAGAIN)
1676 goto error;
1677 if (oldflo == NULL)
1678 goto make_dummy_bundle;
1679 dst_hold(&xdst->u.dst);
1680 return oldflo;
1681 }
1682
1683 /* Kill the previous bundle */
1684 if (xdst) {
1685 /* The policies were stolen for newly generated bundle */
1686 xdst->num_pols = 0;
1687 dst_free(&xdst->u.dst);
1688 }
1689
1690 /* Flow cache does not have reference, it dst_free()'s,
1691 * but we do need to return one reference for original caller */
1692 dst_hold(&new_xdst->u.dst);
1693 return &new_xdst->flo;
1694
1695make_dummy_bundle:
1696 /* We found policies, but there's no bundles to instantiate:
1697 * either because the policy blocks, has no transformations or
1698 * we could not build template (no xfrm_states).*/
1699 xdst = xfrm_alloc_dst(net, family);
1700 if (IS_ERR(xdst)) {
1701 xfrm_pols_put(pols, num_pols);
1702 return ERR_CAST(xdst);
1703 }
1704 xdst->num_pols = num_pols;
1705 xdst->num_xfrms = num_xfrms;
1706 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1707
1708 dst_hold(&xdst->u.dst);
1709 return &xdst->flo;
1710
1711inc_error:
1712 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1713error:
1714 if (xdst != NULL)
1715 dst_free(&xdst->u.dst);
1716 else
1717 xfrm_pols_put(pols, num_pols);
1718 return ERR_PTR(err);
1719}
1549 1720
1550/* Main function: finds/creates a bundle for given flow. 1721/* Main function: finds/creates a bundle for given flow.
1551 * 1722 *
@@ -1555,245 +1726,152 @@ static int stale_bundle(struct dst_entry *dst);
1555int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, 1726int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
1556 struct sock *sk, int flags) 1727 struct sock *sk, int flags)
1557{ 1728{
1558 struct xfrm_policy *policy;
1559 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1729 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1560 int npols; 1730 struct flow_cache_object *flo;
1561 int pol_dead; 1731 struct xfrm_dst *xdst;
1562 int xfrm_nr; 1732 struct dst_entry *dst, *dst_orig = *dst_p, *route;
1563 int pi; 1733 u16 family = dst_orig->ops->family;
1564 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1565 struct dst_entry *dst, *dst_orig = *dst_p;
1566 int nx = 0;
1567 int err;
1568 u32 genid;
1569 u16 family;
1570 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1734 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1735 int i, err, num_pols, num_xfrms, drop_pols = 0;
1571 1736
1572restart: 1737restart:
1573 genid = atomic_read(&flow_cache_genid); 1738 dst = NULL;
1574 policy = NULL; 1739 xdst = NULL;
1575 for (pi = 0; pi < ARRAY_SIZE(pols); pi++) 1740 route = NULL;
1576 pols[pi] = NULL;
1577 npols = 0;
1578 pol_dead = 0;
1579 xfrm_nr = 0;
1580 1741
1581 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1742 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1582 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1743 num_pols = 1;
1583 err = PTR_ERR(policy); 1744 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1584 if (IS_ERR(policy)) { 1745 err = xfrm_expand_policies(fl, family, pols,
1585 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1746 &num_pols, &num_xfrms);
1747 if (err < 0)
1586 goto dropdst; 1748 goto dropdst;
1749
1750 if (num_pols) {
1751 if (num_xfrms <= 0) {
1752 drop_pols = num_pols;
1753 goto no_transform;
1754 }
1755
1756 xdst = xfrm_resolve_and_create_bundle(
1757 pols, num_pols, fl,
1758 family, dst_orig);
1759 if (IS_ERR(xdst)) {
1760 xfrm_pols_put(pols, num_pols);
1761 err = PTR_ERR(xdst);
1762 goto dropdst;
1763 }
1764
1765 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
1766 xdst->u.dst.next = xfrm_policy_sk_bundles;
1767 xfrm_policy_sk_bundles = &xdst->u.dst;
1768 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
1769
1770 route = xdst->route;
1587 } 1771 }
1588 } 1772 }
1589 1773
1590 if (!policy) { 1774 if (xdst == NULL) {
1591 /* To accelerate a bit... */ 1775 /* To accelerate a bit... */
1592 if ((dst_orig->flags & DST_NOXFRM) || 1776 if ((dst_orig->flags & DST_NOXFRM) ||
1593 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 1777 !net->xfrm.policy_count[XFRM_POLICY_OUT])
1594 goto nopol; 1778 goto nopol;
1595 1779
1596 policy = flow_cache_lookup(net, fl, dst_orig->ops->family, 1780 flo = flow_cache_lookup(net, fl, family, dir,
1597 dir, xfrm_policy_lookup); 1781 xfrm_bundle_lookup, dst_orig);
1598 err = PTR_ERR(policy); 1782 if (flo == NULL)
1599 if (IS_ERR(policy)) { 1783 goto nopol;
1600 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1784 if (IS_ERR(flo)) {
1785 err = PTR_ERR(flo);
1601 goto dropdst; 1786 goto dropdst;
1602 } 1787 }
1788 xdst = container_of(flo, struct xfrm_dst, flo);
1789
1790 num_pols = xdst->num_pols;
1791 num_xfrms = xdst->num_xfrms;
1792 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
1793 route = xdst->route;
1794 }
1795
1796 dst = &xdst->u.dst;
1797 if (route == NULL && num_xfrms > 0) {
1798 /* The only case when xfrm_bundle_lookup() returns a
1799 * bundle with null route, is when the template could
1800 * not be resolved. It means policies are there, but
1801 * bundle could not be created, since we don't yet
1802 * have the xfrm_state's. We need to wait for KM to
1803 * negotiate new SA's or bail out with error.*/
1804 if (net->xfrm.sysctl_larval_drop) {
1805 /* EREMOTE tells the caller to generate
1806 * a one-shot blackhole route. */
1807 dst_release(dst);
1808 xfrm_pols_put(pols, num_pols);
1809 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1810 return -EREMOTE;
1811 }
1812 if (flags & XFRM_LOOKUP_WAIT) {
1813 DECLARE_WAITQUEUE(wait, current);
1814
1815 add_wait_queue(&net->xfrm.km_waitq, &wait);
1816 set_current_state(TASK_INTERRUPTIBLE);
1817 schedule();
1818 set_current_state(TASK_RUNNING);
1819 remove_wait_queue(&net->xfrm.km_waitq, &wait);
1820
1821 if (!signal_pending(current)) {
1822 dst_release(dst);
1823 goto restart;
1824 }
1825
1826 err = -ERESTART;
1827 } else
1828 err = -EAGAIN;
1829
1830 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1831 goto error;
1603 } 1832 }
1604 1833
1605 if (!policy) 1834no_transform:
1835 if (num_pols == 0)
1606 goto nopol; 1836 goto nopol;
1607 1837
1608 family = dst_orig->ops->family; 1838 if ((flags & XFRM_LOOKUP_ICMP) &&
1609 pols[0] = policy; 1839 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
1610 npols ++; 1840 err = -ENOENT;
1611 xfrm_nr += pols[0]->xfrm_nr;
1612
1613 err = -ENOENT;
1614 if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
1615 goto error; 1841 goto error;
1842 }
1616 1843
1617 policy->curlft.use_time = get_seconds(); 1844 for (i = 0; i < num_pols; i++)
1845 pols[i]->curlft.use_time = get_seconds();
1618 1846
1619 switch (policy->action) { 1847 if (num_xfrms < 0) {
1620 default:
1621 case XFRM_POLICY_BLOCK:
1622 /* Prohibit the flow */ 1848 /* Prohibit the flow */
1623 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 1849 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1624 err = -EPERM; 1850 err = -EPERM;
1625 goto error; 1851 goto error;
1626 1852 } else if (num_xfrms > 0) {
1627 case XFRM_POLICY_ALLOW: 1853 /* Flow transformed */
1628#ifndef CONFIG_XFRM_SUB_POLICY 1854 *dst_p = dst;
1629 if (policy->xfrm_nr == 0) { 1855 dst_release(dst_orig);
1630 /* Flow passes not transformed. */ 1856 } else {
1631 xfrm_pol_put(policy); 1857 /* Flow passes untransformed */
1632 return 0; 1858 dst_release(dst);
1633 }
1634#endif
1635
1636 /* Try to find matching bundle.
1637 *
1638 * LATER: help from flow cache. It is optional, this
1639 * is required only for output policy.
1640 */
1641 dst = xfrm_find_bundle(fl, policy, family);
1642 if (IS_ERR(dst)) {
1643 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1644 err = PTR_ERR(dst);
1645 goto error;
1646 }
1647
1648 if (dst)
1649 break;
1650
1651#ifdef CONFIG_XFRM_SUB_POLICY
1652 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1653 pols[1] = xfrm_policy_lookup_bytype(net,
1654 XFRM_POLICY_TYPE_MAIN,
1655 fl, family,
1656 XFRM_POLICY_OUT);
1657 if (pols[1]) {
1658 if (IS_ERR(pols[1])) {
1659 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1660 err = PTR_ERR(pols[1]);
1661 goto error;
1662 }
1663 if (pols[1]->action == XFRM_POLICY_BLOCK) {
1664 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1665 err = -EPERM;
1666 goto error;
1667 }
1668 npols ++;
1669 xfrm_nr += pols[1]->xfrm_nr;
1670 }
1671 }
1672
1673 /*
1674 * Because neither flowi nor bundle information knows about
1675 * transformation template size. On more than one policy usage
1676 * we can realize whether all of them is bypass or not after
1677 * they are searched. See above not-transformed bypass
1678 * is surrounded by non-sub policy configuration, too.
1679 */
1680 if (xfrm_nr == 0) {
1681 /* Flow passes not transformed. */
1682 xfrm_pols_put(pols, npols);
1683 return 0;
1684 }
1685
1686#endif
1687 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1688
1689 if (unlikely(nx<0)) {
1690 err = nx;
1691 if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) {
1692 /* EREMOTE tells the caller to generate
1693 * a one-shot blackhole route.
1694 */
1695 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1696 xfrm_pol_put(policy);
1697 return -EREMOTE;
1698 }
1699 if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
1700 DECLARE_WAITQUEUE(wait, current);
1701
1702 add_wait_queue(&net->xfrm.km_waitq, &wait);
1703 set_current_state(TASK_INTERRUPTIBLE);
1704 schedule();
1705 set_current_state(TASK_RUNNING);
1706 remove_wait_queue(&net->xfrm.km_waitq, &wait);
1707
1708 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1709
1710 if (nx == -EAGAIN && signal_pending(current)) {
1711 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1712 err = -ERESTART;
1713 goto error;
1714 }
1715 if (nx == -EAGAIN ||
1716 genid != atomic_read(&flow_cache_genid)) {
1717 xfrm_pols_put(pols, npols);
1718 goto restart;
1719 }
1720 err = nx;
1721 }
1722 if (err < 0) {
1723 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1724 goto error;
1725 }
1726 }
1727 if (nx == 0) {
1728 /* Flow passes not transformed. */
1729 xfrm_pols_put(pols, npols);
1730 return 0;
1731 }
1732
1733 dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
1734 err = PTR_ERR(dst);
1735 if (IS_ERR(dst)) {
1736 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1737 goto error;
1738 }
1739
1740 for (pi = 0; pi < npols; pi++) {
1741 read_lock_bh(&pols[pi]->lock);
1742 pol_dead |= pols[pi]->walk.dead;
1743 read_unlock_bh(&pols[pi]->lock);
1744 }
1745
1746 write_lock_bh(&policy->lock);
1747 if (unlikely(pol_dead || stale_bundle(dst))) {
1748 /* Wow! While we worked on resolving, this
1749 * policy has gone. Retry. It is not paranoia,
1750 * we just cannot enlist new bundle to dead object.
1751 * We can't enlist stable bundles either.
1752 */
1753 write_unlock_bh(&policy->lock);
1754 dst_free(dst);
1755
1756 if (pol_dead)
1757 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD);
1758 else
1759 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1760 err = -EHOSTUNREACH;
1761 goto error;
1762 }
1763
1764 if (npols > 1)
1765 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1766 else
1767 err = xfrm_dst_update_origin(dst, fl);
1768 if (unlikely(err)) {
1769 write_unlock_bh(&policy->lock);
1770 dst_free(dst);
1771 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1772 goto error;
1773 }
1774
1775 dst->next = policy->bundles;
1776 policy->bundles = dst;
1777 dst_hold(dst);
1778 write_unlock_bh(&policy->lock);
1779 } 1859 }
1780 *dst_p = dst; 1860ok:
1781 dst_release(dst_orig); 1861 xfrm_pols_put(pols, drop_pols);
1782 xfrm_pols_put(pols, npols);
1783 return 0; 1862 return 0;
1784 1863
1864nopol:
1865 if (!(flags & XFRM_LOOKUP_ICMP))
1866 goto ok;
1867 err = -ENOENT;
1785error: 1868error:
1786 xfrm_pols_put(pols, npols); 1869 dst_release(dst);
1787dropdst: 1870dropdst:
1788 dst_release(dst_orig); 1871 dst_release(dst_orig);
1789 *dst_p = NULL; 1872 *dst_p = NULL;
1873 xfrm_pols_put(pols, drop_pols);
1790 return err; 1874 return err;
1791
1792nopol:
1793 err = -ENOENT;
1794 if (flags & XFRM_LOOKUP_ICMP)
1795 goto dropdst;
1796 return 0;
1797} 1875}
1798EXPORT_SYMBOL(__xfrm_lookup); 1876EXPORT_SYMBOL(__xfrm_lookup);
1799 1877
@@ -1952,9 +2030,16 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1952 } 2030 }
1953 } 2031 }
1954 2032
1955 if (!pol) 2033 if (!pol) {
1956 pol = flow_cache_lookup(net, &fl, family, fl_dir, 2034 struct flow_cache_object *flo;
1957 xfrm_policy_lookup); 2035
2036 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2037 xfrm_policy_lookup, NULL);
2038 if (IS_ERR_OR_NULL(flo))
2039 pol = ERR_CAST(flo);
2040 else
2041 pol = container_of(flo, struct xfrm_policy, flo);
2042 }
1958 2043
1959 if (IS_ERR(pol)) { 2044 if (IS_ERR(pol)) {
1960 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2045 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
@@ -2138,71 +2223,24 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2138 return dst; 2223 return dst;
2139} 2224}
2140 2225
2141static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p) 2226static void __xfrm_garbage_collect(struct net *net)
2142{
2143 struct dst_entry *dst, **dstp;
2144
2145 write_lock(&pol->lock);
2146 dstp = &pol->bundles;
2147 while ((dst=*dstp) != NULL) {
2148 if (func(dst)) {
2149 *dstp = dst->next;
2150 dst->next = *gc_list_p;
2151 *gc_list_p = dst;
2152 } else {
2153 dstp = &dst->next;
2154 }
2155 }
2156 write_unlock(&pol->lock);
2157}
2158
2159static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
2160{ 2227{
2161 struct dst_entry *gc_list = NULL; 2228 struct dst_entry *head, *next;
2162 int dir;
2163 2229
2164 read_lock_bh(&xfrm_policy_lock); 2230 flow_cache_flush();
2165 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2166 struct xfrm_policy *pol;
2167 struct hlist_node *entry;
2168 struct hlist_head *table;
2169 int i;
2170 2231
2171 hlist_for_each_entry(pol, entry, 2232 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2172 &net->xfrm.policy_inexact[dir], bydst) 2233 head = xfrm_policy_sk_bundles;
2173 prune_one_bundle(pol, func, &gc_list); 2234 xfrm_policy_sk_bundles = NULL;
2235 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2174 2236
2175 table = net->xfrm.policy_bydst[dir].table; 2237 while (head) {
2176 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 2238 next = head->next;
2177 hlist_for_each_entry(pol, entry, table + i, bydst) 2239 dst_free(head);
2178 prune_one_bundle(pol, func, &gc_list); 2240 head = next;
2179 }
2180 }
2181 read_unlock_bh(&xfrm_policy_lock);
2182
2183 while (gc_list) {
2184 struct dst_entry *dst = gc_list;
2185 gc_list = dst->next;
2186 dst_free(dst);
2187 } 2241 }
2188} 2242}
2189 2243
2190static int unused_bundle(struct dst_entry *dst)
2191{
2192 return !atomic_read(&dst->__refcnt);
2193}
2194
2195static void __xfrm_garbage_collect(struct net *net)
2196{
2197 xfrm_prune_bundles(net, unused_bundle);
2198}
2199
2200static int xfrm_flush_bundles(struct net *net)
2201{
2202 xfrm_prune_bundles(net, stale_bundle);
2203 return 0;
2204}
2205
2206static void xfrm_init_pmtu(struct dst_entry *dst) 2244static void xfrm_init_pmtu(struct dst_entry *dst)
2207{ 2245{
2208 do { 2246 do {
@@ -2260,7 +2298,9 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2260 return 0; 2298 return 0;
2261 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2299 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2262 return 0; 2300 return 0;
2263 if (xdst->genid != dst->xfrm->genid) 2301 if (xdst->xfrm_genid != dst->xfrm->genid)
2302 return 0;
2303 if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2264 return 0; 2304 return 0;
2265 2305
2266 if (strict && fl && 2306 if (strict && fl &&
@@ -2425,7 +2465,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
2425 2465
2426 switch (event) { 2466 switch (event) {
2427 case NETDEV_DOWN: 2467 case NETDEV_DOWN:
2428 xfrm_flush_bundles(dev_net(dev)); 2468 __xfrm_garbage_collect(dev_net(dev));
2429 } 2469 }
2430 return NOTIFY_DONE; 2470 return NOTIFY_DONE;
2431} 2471}
@@ -2531,7 +2571,6 @@ static void xfrm_policy_fini(struct net *net)
2531 audit_info.sessionid = -1; 2571 audit_info.sessionid = -1;
2532 audit_info.secid = 0; 2572 audit_info.secid = 0;
2533 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2573 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2534 flush_work(&xfrm_policy_gc_work);
2535 2574
2536 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2575 WARN_ON(!list_empty(&net->xfrm.policy_all));
2537 2576
@@ -2757,7 +2796,6 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
2757 struct xfrm_migrate *m, int num_migrate) 2796 struct xfrm_migrate *m, int num_migrate)
2758{ 2797{
2759 struct xfrm_migrate *mp; 2798 struct xfrm_migrate *mp;
2760 struct dst_entry *dst;
2761 int i, j, n = 0; 2799 int i, j, n = 0;
2762 2800
2763 write_lock_bh(&pol->lock); 2801 write_lock_bh(&pol->lock);
@@ -2782,10 +2820,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
2782 sizeof(pol->xfrm_vec[i].saddr)); 2820 sizeof(pol->xfrm_vec[i].saddr));
2783 pol->xfrm_vec[i].encap_family = mp->new_family; 2821 pol->xfrm_vec[i].encap_family = mp->new_family;
2784 /* flush bundles */ 2822 /* flush bundles */
2785 while ((dst = pol->bundles) != NULL) { 2823 atomic_inc(&pol->genid);
2786 pol->bundles = dst->next;
2787 dst_free(dst);
2788 }
2789 } 2824 }
2790 } 2825 }
2791 2826
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 17d5b96f2fc8..5208b12fbfb4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -22,6 +22,7 @@
22#include <linux/audit.h> 22#include <linux/audit.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <linux/ktime.h> 24#include <linux/ktime.h>
25#include <linux/slab.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27 28
@@ -37,7 +38,6 @@
37static DEFINE_SPINLOCK(xfrm_state_lock); 38static DEFINE_SPINLOCK(xfrm_state_lock);
38 39
39static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
40static unsigned int xfrm_state_genid;
41 41
42static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 42static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
43static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); 43static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
@@ -923,8 +923,6 @@ static void __xfrm_state_insert(struct xfrm_state *x)
923 struct net *net = xs_net(x); 923 struct net *net = xs_net(x);
924 unsigned int h; 924 unsigned int h;
925 925
926 x->genid = ++xfrm_state_genid;
927
928 list_add(&x->km.all, &net->xfrm.state_all); 926 list_add(&x->km.all, &net->xfrm.state_all);
929 927
930 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, 928 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
@@ -970,7 +968,7 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
970 (mark & x->mark.m) == x->mark.v && 968 (mark & x->mark.m) == x->mark.v &&
971 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && 969 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
972 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) 970 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
973 x->genid = xfrm_state_genid; 971 x->genid++;
974 } 972 }
975} 973}
976 974
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 2c4d6cdcba49..05640bc9594b 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -1,4 +1,5 @@
1#include <linux/sysctl.h> 1#include <linux/sysctl.h>
2#include <linux/slab.h>
2#include <net/net_namespace.h> 3#include <net/net_namespace.h>
3#include <net/xfrm.h> 4#include <net/xfrm.h>
4 5
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 6106b72826d3..a267fbdda525 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1741,6 +1741,10 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1741 if (err) 1741 if (err)
1742 return err; 1742 return err;
1743 1743
1744 err = verify_policy_dir(p->dir);
1745 if (err)
1746 return err;
1747
1744 if (p->index) 1748 if (p->index)
1745 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); 1749 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1746 else { 1750 else {
@@ -1766,13 +1770,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1766 if (xp == NULL) 1770 if (xp == NULL)
1767 return -ENOENT; 1771 return -ENOENT;
1768 1772
1769 read_lock(&xp->lock); 1773 if (unlikely(xp->walk.dead))
1770 if (xp->walk.dead) {
1771 read_unlock(&xp->lock);
1772 goto out; 1774 goto out;
1773 }
1774 1775
1775 read_unlock(&xp->lock);
1776 err = 0; 1776 err = 0;
1777 if (up->hard) { 1777 if (up->hard) {
1778 uid_t loginuid = NETLINK_CB(skb).loginuid; 1778 uid_t loginuid = NETLINK_CB(skb).loginuid;